Remove assert in get_def_bb_for_const
[official-gcc.git] / gcc / sched-deps.c
blob41a6af25c79b609af947538d8b7df1d87dfcb8a0
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2016 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "insn-attr.h"
36 #include "cfgbuild.h"
37 #include "sched-int.h"
38 #include "params.h"
39 #include "cselib.h"
41 #ifdef INSN_SCHEDULING
43 /* Holds current parameters for the dependency analyzer. */
44 struct sched_deps_info_def *sched_deps_info;
46 /* The data is specific to the Haifa scheduler. */
47 vec<haifa_deps_insn_data_def>
48 h_d_i_d = vNULL;
50 /* Return the major type present in the DS. */
51 enum reg_note
52 ds_to_dk (ds_t ds)
54 if (ds & DEP_TRUE)
55 return REG_DEP_TRUE;
57 if (ds & DEP_OUTPUT)
58 return REG_DEP_OUTPUT;
60 if (ds & DEP_CONTROL)
61 return REG_DEP_CONTROL;
63 gcc_assert (ds & DEP_ANTI);
65 return REG_DEP_ANTI;
68 /* Return equivalent dep_status. */
69 ds_t
70 dk_to_ds (enum reg_note dk)
72 switch (dk)
74 case REG_DEP_TRUE:
75 return DEP_TRUE;
77 case REG_DEP_OUTPUT:
78 return DEP_OUTPUT;
80 case REG_DEP_CONTROL:
81 return DEP_CONTROL;
83 default:
84 gcc_assert (dk == REG_DEP_ANTI);
85 return DEP_ANTI;
89 /* Functions to operate with dependence information container - dep_t. */
91 /* Init DEP with the arguments. */
92 void
93 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 DEP_PRO (dep) = pro;
96 DEP_CON (dep) = con;
97 DEP_TYPE (dep) = type;
98 DEP_STATUS (dep) = ds;
99 DEP_COST (dep) = UNKNOWN_DEP_COST;
100 DEP_NONREG (dep) = 0;
101 DEP_MULTIPLE (dep) = 0;
102 DEP_REPLACE (dep) = NULL;
105 /* Init DEP with the arguments.
106 While most of the scheduler (including targets) only need the major type
107 of the dependency, it is convenient to hide full dep_status from them. */
108 void
109 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
111 ds_t ds;
113 if ((current_sched_info->flags & USE_DEPS_LIST))
114 ds = dk_to_ds (kind);
115 else
116 ds = 0;
118 init_dep_1 (dep, pro, con, kind, ds);
121 /* Make a copy of FROM in TO. */
122 static void
123 copy_dep (dep_t to, dep_t from)
125 memcpy (to, from, sizeof (*to));
128 static void dump_ds (FILE *, ds_t);
130 /* Define flags for dump_dep (). */
132 /* Dump producer of the dependence. */
133 #define DUMP_DEP_PRO (2)
135 /* Dump consumer of the dependence. */
136 #define DUMP_DEP_CON (4)
138 /* Dump type of the dependence. */
139 #define DUMP_DEP_TYPE (8)
141 /* Dump status of the dependence. */
142 #define DUMP_DEP_STATUS (16)
144 /* Dump all information about the dependence. */
145 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
146 |DUMP_DEP_STATUS)
148 /* Dump DEP to DUMP.
149 FLAGS is a bit mask specifying what information about DEP needs
150 to be printed.
151 If FLAGS has the very first bit set, then dump all information about DEP
152 and propagate this bit into the callee dump functions. */
153 static void
154 dump_dep (FILE *dump, dep_t dep, int flags)
156 if (flags & 1)
157 flags |= DUMP_DEP_ALL;
159 fprintf (dump, "<");
161 if (flags & DUMP_DEP_PRO)
162 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164 if (flags & DUMP_DEP_CON)
165 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167 if (flags & DUMP_DEP_TYPE)
169 char t;
170 enum reg_note type = DEP_TYPE (dep);
172 switch (type)
174 case REG_DEP_TRUE:
175 t = 't';
176 break;
178 case REG_DEP_OUTPUT:
179 t = 'o';
180 break;
182 case REG_DEP_CONTROL:
183 t = 'c';
184 break;
186 case REG_DEP_ANTI:
187 t = 'a';
188 break;
190 default:
191 gcc_unreachable ();
192 break;
195 fprintf (dump, "%c; ", t);
198 if (flags & DUMP_DEP_STATUS)
200 if (current_sched_info->flags & USE_DEPS_LIST)
201 dump_ds (dump, DEP_STATUS (dep));
204 fprintf (dump, ">");
207 /* Default flags for dump_dep (). */
208 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
210 /* Dump all fields of DEP to STDERR. */
211 void
212 sd_debug_dep (dep_t dep)
214 dump_dep (stderr, dep, 1);
215 fprintf (stderr, "\n");
218 /* Determine whether DEP is a dependency link of a non-debug insn on a
219 debug insn. */
221 static inline bool
222 depl_on_debug_p (dep_link_t dep)
224 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
225 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228 /* Functions to operate with a single link from the dependencies lists -
229 dep_link_t. */
231 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
232 PREV_NEXT_P. */
233 static void
234 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
236 dep_link_t next = *prev_nextp;
238 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
239 && DEP_LINK_NEXT (l) == NULL);
241 /* Init node being inserted. */
242 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
243 DEP_LINK_NEXT (l) = next;
245 /* Fix next node. */
246 if (next != NULL)
248 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
250 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253 /* Fix prev node. */
254 *prev_nextp = l;
257 /* Add dep_link LINK to deps_list L. */
258 static void
259 add_to_deps_list (dep_link_t link, deps_list_t l)
261 attach_dep_link (link, &DEPS_LIST_FIRST (l));
263 /* Don't count debug deps. */
264 if (!depl_on_debug_p (link))
265 ++DEPS_LIST_N_LINKS (l);
268 /* Detach dep_link L from the list. */
269 static void
270 detach_dep_link (dep_link_t l)
272 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
273 dep_link_t next = DEP_LINK_NEXT (l);
275 *prev_nextp = next;
277 if (next != NULL)
278 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
280 DEP_LINK_PREV_NEXTP (l) = NULL;
281 DEP_LINK_NEXT (l) = NULL;
284 /* Remove link LINK from list LIST. */
285 static void
286 remove_from_deps_list (dep_link_t link, deps_list_t list)
288 detach_dep_link (link);
290 /* Don't count debug deps. */
291 if (!depl_on_debug_p (link))
292 --DEPS_LIST_N_LINKS (list);
295 /* Move link LINK from list FROM to list TO. */
296 static void
297 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
299 remove_from_deps_list (link, from);
300 add_to_deps_list (link, to);
303 /* Return true of LINK is not attached to any list. */
304 static bool
305 dep_link_is_detached_p (dep_link_t link)
307 return DEP_LINK_PREV_NEXTP (link) == NULL;
310 /* Pool to hold all dependency nodes (dep_node_t). */
311 static object_allocator<_dep_node> *dn_pool;
313 /* Number of dep_nodes out there. */
314 static int dn_pool_diff = 0;
316 /* Create a dep_node. */
317 static dep_node_t
318 create_dep_node (void)
320 dep_node_t n = dn_pool->allocate ();
321 dep_link_t back = DEP_NODE_BACK (n);
322 dep_link_t forw = DEP_NODE_FORW (n);
324 DEP_LINK_NODE (back) = n;
325 DEP_LINK_NEXT (back) = NULL;
326 DEP_LINK_PREV_NEXTP (back) = NULL;
328 DEP_LINK_NODE (forw) = n;
329 DEP_LINK_NEXT (forw) = NULL;
330 DEP_LINK_PREV_NEXTP (forw) = NULL;
332 ++dn_pool_diff;
334 return n;
337 /* Delete dep_node N. N must not be connected to any deps_list. */
338 static void
339 delete_dep_node (dep_node_t n)
341 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
342 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
344 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
346 --dn_pool_diff;
348 dn_pool->remove (n);
351 /* Pool to hold dependencies lists (deps_list_t). */
352 static object_allocator<_deps_list> *dl_pool;
354 /* Number of deps_lists out there. */
355 static int dl_pool_diff = 0;
357 /* Functions to operate with dependences lists - deps_list_t. */
359 /* Return true if list L is empty. */
360 static bool
361 deps_list_empty_p (deps_list_t l)
363 return DEPS_LIST_N_LINKS (l) == 0;
366 /* Create a new deps_list. */
367 static deps_list_t
368 create_deps_list (void)
370 deps_list_t l = dl_pool->allocate ();
372 DEPS_LIST_FIRST (l) = NULL;
373 DEPS_LIST_N_LINKS (l) = 0;
375 ++dl_pool_diff;
376 return l;
379 /* Free deps_list L. */
380 static void
381 free_deps_list (deps_list_t l)
383 gcc_assert (deps_list_empty_p (l));
385 --dl_pool_diff;
387 dl_pool->remove (l);
390 /* Return true if there is no dep_nodes and deps_lists out there.
391 After the region is scheduled all the dependency nodes and lists
392 should [generally] be returned to pool. */
393 bool
394 deps_pools_are_empty_p (void)
396 return dn_pool_diff == 0 && dl_pool_diff == 0;
399 /* Remove all elements from L. */
400 static void
401 clear_deps_list (deps_list_t l)
405 dep_link_t link = DEPS_LIST_FIRST (l);
407 if (link == NULL)
408 break;
410 remove_from_deps_list (link, l);
412 while (1);
415 /* Decide whether a dependency should be treated as a hard or a speculative
416 dependency. */
417 static bool
418 dep_spec_p (dep_t dep)
420 if (current_sched_info->flags & DO_SPECULATION)
422 if (DEP_STATUS (dep) & SPECULATIVE)
423 return true;
425 if (current_sched_info->flags & DO_PREDICATION)
427 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
428 return true;
430 if (DEP_REPLACE (dep) != NULL)
431 return true;
432 return false;
435 static regset reg_pending_sets;
436 static regset reg_pending_clobbers;
437 static regset reg_pending_uses;
438 static regset reg_pending_control_uses;
439 static enum reg_pending_barrier_mode reg_pending_barrier;
441 /* Hard registers implicitly clobbered or used (or may be implicitly
442 clobbered or used) by the currently analyzed insn. For example,
443 insn in its constraint has one register class. Even if there is
444 currently no hard register in the insn, the particular hard
445 register will be in the insn after reload pass because the
446 constraint requires it. */
447 static HARD_REG_SET implicit_reg_pending_clobbers;
448 static HARD_REG_SET implicit_reg_pending_uses;
450 /* To speed up the test for duplicate dependency links we keep a
451 record of dependencies created by add_dependence when the average
452 number of instructions in a basic block is very large.
454 Studies have shown that there is typically around 5 instructions between
455 branches for typical C code. So we can make a guess that the average
456 basic block is approximately 5 instructions long; we will choose 100X
457 the average size as a very large basic block.
459 Each insn has associated bitmaps for its dependencies. Each bitmap
460 has enough entries to represent a dependency on any other insn in
461 the insn chain. All bitmap for true dependencies cache is
462 allocated then the rest two ones are also allocated. */
463 static bitmap_head *true_dependency_cache = NULL;
464 static bitmap_head *output_dependency_cache = NULL;
465 static bitmap_head *anti_dependency_cache = NULL;
466 static bitmap_head *control_dependency_cache = NULL;
467 static bitmap_head *spec_dependency_cache = NULL;
468 static int cache_size;
470 /* True if we should mark added dependencies as a non-register deps. */
471 static bool mark_as_hard;
473 static int deps_may_trap_p (const_rtx);
474 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
475 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
476 enum reg_note, bool);
477 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
478 rtx_insn_list **, int, enum reg_note,
479 bool);
480 static void delete_all_dependences (rtx_insn *);
481 static void chain_to_prev_insn (rtx_insn *);
483 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
484 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
485 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
486 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
488 static bool sched_has_condition_p (const rtx_insn *);
489 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
491 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
492 rtx, rtx);
493 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
495 static void check_dep (dep_t, bool);
498 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
500 static int
501 deps_may_trap_p (const_rtx mem)
503 const_rtx addr = XEXP (mem, 0);
505 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507 const_rtx t = get_reg_known_value (REGNO (addr));
508 if (t)
509 addr = t;
511 return rtx_addr_can_trap_p (addr);
515 /* Find the condition under which INSN is executed. If REV is not NULL,
516 it is set to TRUE when the returned comparison should be reversed
517 to get the actual condition. */
518 static rtx
519 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
521 rtx pat = PATTERN (insn);
522 rtx src;
524 if (rev)
525 *rev = false;
527 if (GET_CODE (pat) == COND_EXEC)
528 return COND_EXEC_TEST (pat);
530 if (!any_condjump_p (insn) || !onlyjump_p (insn))
531 return 0;
533 src = SET_SRC (pc_set (insn));
535 if (XEXP (src, 2) == pc_rtx)
536 return XEXP (src, 0);
537 else if (XEXP (src, 1) == pc_rtx)
539 rtx cond = XEXP (src, 0);
540 enum rtx_code revcode = reversed_comparison_code (cond, insn);
542 if (revcode == UNKNOWN)
543 return 0;
545 if (rev)
546 *rev = true;
547 return cond;
550 return 0;
553 /* Return the condition under which INSN does not execute (i.e. the
554 not-taken condition for a conditional branch), or NULL if we cannot
555 find such a condition. The caller should make a copy of the condition
556 before using it. */
558 sched_get_reverse_condition_uncached (const rtx_insn *insn)
560 bool rev;
561 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
562 if (cond == NULL_RTX)
563 return cond;
564 if (!rev)
566 enum rtx_code revcode = reversed_comparison_code (cond, insn);
567 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
568 XEXP (cond, 0),
569 XEXP (cond, 1));
571 return cond;
574 /* Caching variant of sched_get_condition_with_rev_uncached.
575 We only do actual work the first time we come here for an insn; the
576 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
577 static rtx
578 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
580 bool tmp;
582 if (INSN_LUID (insn) == 0)
583 return sched_get_condition_with_rev_uncached (insn, rev);
585 if (INSN_CACHED_COND (insn) == const_true_rtx)
586 return NULL_RTX;
588 if (INSN_CACHED_COND (insn) != NULL_RTX)
590 if (rev)
591 *rev = INSN_REVERSE_COND (insn);
592 return INSN_CACHED_COND (insn);
595 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
596 INSN_REVERSE_COND (insn) = tmp;
598 if (INSN_CACHED_COND (insn) == NULL_RTX)
600 INSN_CACHED_COND (insn) = const_true_rtx;
601 return NULL_RTX;
604 if (rev)
605 *rev = INSN_REVERSE_COND (insn);
606 return INSN_CACHED_COND (insn);
609 /* True when we can find a condition under which INSN is executed. */
610 static bool
611 sched_has_condition_p (const rtx_insn *insn)
613 return !! sched_get_condition_with_rev (insn, NULL);
618 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
619 static int
620 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622 if (COMPARISON_P (cond1)
623 && COMPARISON_P (cond2)
624 && GET_CODE (cond1) ==
625 (rev1==rev2
626 ? reversed_comparison_code (cond2, NULL)
627 : GET_CODE (cond2))
628 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
629 && XEXP (cond1, 1) == XEXP (cond2, 1))
630 return 1;
631 return 0;
634 /* Return true if insn1 and insn2 can never depend on one another because
635 the conditions under which they are executed are mutually exclusive. */
636 bool
637 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
639 rtx cond1, cond2;
640 bool rev1 = false, rev2 = false;
642 /* df doesn't handle conditional lifetimes entirely correctly;
643 calls mess up the conditional lifetimes. */
644 if (!CALL_P (insn1) && !CALL_P (insn2))
646 cond1 = sched_get_condition_with_rev (insn1, &rev1);
647 cond2 = sched_get_condition_with_rev (insn2, &rev2);
648 if (cond1 && cond2
649 && conditions_mutex_p (cond1, cond2, rev1, rev2)
650 /* Make sure first instruction doesn't affect condition of second
651 instruction if switched. */
652 && !modified_in_p (cond1, insn2)
653 /* Make sure second instruction doesn't affect condition of first
654 instruction if switched. */
655 && !modified_in_p (cond2, insn1))
656 return true;
658 return false;
662 /* Return true if INSN can potentially be speculated with type DS. */
663 bool
664 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
666 if (HAS_INTERNAL_DEP (insn))
667 return false;
669 if (!NONJUMP_INSN_P (insn))
670 return false;
672 if (SCHED_GROUP_P (insn))
673 return false;
675 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
676 return false;
678 if (side_effects_p (PATTERN (insn)))
679 return false;
681 if (ds & BE_IN_SPEC)
682 /* The following instructions, which depend on a speculatively scheduled
683 instruction, cannot be speculatively scheduled along. */
685 if (may_trap_or_fault_p (PATTERN (insn)))
686 /* If instruction might fault, it cannot be speculatively scheduled.
687 For control speculation it's obvious why and for data speculation
688 it's because the insn might get wrong input if speculation
689 wasn't successful. */
690 return false;
692 if ((ds & BE_IN_DATA)
693 && sched_has_condition_p (insn))
694 /* If this is a predicated instruction, then it cannot be
695 speculatively scheduled. See PR35659. */
696 return false;
699 return true;
702 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
703 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
704 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
705 This function is used to switch sd_iterator to the next list.
706 !!! For internal use only. Might consider moving it to sched-int.h. */
707 void
708 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
709 deps_list_t *list_ptr, bool *resolved_p_ptr)
711 sd_list_types_def types = *types_ptr;
713 if (types & SD_LIST_HARD_BACK)
715 *list_ptr = INSN_HARD_BACK_DEPS (insn);
716 *resolved_p_ptr = false;
717 *types_ptr = types & ~SD_LIST_HARD_BACK;
719 else if (types & SD_LIST_SPEC_BACK)
721 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
722 *resolved_p_ptr = false;
723 *types_ptr = types & ~SD_LIST_SPEC_BACK;
725 else if (types & SD_LIST_FORW)
727 *list_ptr = INSN_FORW_DEPS (insn);
728 *resolved_p_ptr = false;
729 *types_ptr = types & ~SD_LIST_FORW;
731 else if (types & SD_LIST_RES_BACK)
733 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
734 *resolved_p_ptr = true;
735 *types_ptr = types & ~SD_LIST_RES_BACK;
737 else if (types & SD_LIST_RES_FORW)
739 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
740 *resolved_p_ptr = true;
741 *types_ptr = types & ~SD_LIST_RES_FORW;
743 else
745 *list_ptr = NULL;
746 *resolved_p_ptr = false;
747 *types_ptr = SD_LIST_NONE;
751 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
753 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
755 int size = 0;
757 while (list_types != SD_LIST_NONE)
759 deps_list_t list;
760 bool resolved_p;
762 sd_next_list (insn, &list_types, &list, &resolved_p);
763 if (list)
764 size += DEPS_LIST_N_LINKS (list);
767 return size;
770 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
772 bool
773 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775 while (list_types != SD_LIST_NONE)
777 deps_list_t list;
778 bool resolved_p;
780 sd_next_list (insn, &list_types, &list, &resolved_p);
781 if (!deps_list_empty_p (list))
782 return false;
785 return true;
788 /* Initialize data for INSN. */
789 void
790 sd_init_insn (rtx_insn *insn)
792 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
793 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
794 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
795 INSN_FORW_DEPS (insn) = create_deps_list ();
796 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798 /* ??? It would be nice to allocate dependency caches here. */
801 /* Free data for INSN. */
802 void
803 sd_finish_insn (rtx_insn *insn)
805 /* ??? It would be nice to deallocate dependency caches here. */
807 free_deps_list (INSN_HARD_BACK_DEPS (insn));
808 INSN_HARD_BACK_DEPS (insn) = NULL;
810 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
811 INSN_SPEC_BACK_DEPS (insn) = NULL;
813 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
814 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816 free_deps_list (INSN_FORW_DEPS (insn));
817 INSN_FORW_DEPS (insn) = NULL;
819 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
820 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
823 /* Find a dependency between producer PRO and consumer CON.
824 Search through resolved dependency lists if RESOLVED_P is true.
825 If no such dependency is found return NULL,
826 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
827 with an iterator pointing to it. */
828 static dep_t
829 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
830 sd_iterator_def *sd_it_ptr)
832 sd_list_types_def pro_list_type;
833 sd_list_types_def con_list_type;
834 sd_iterator_def sd_it;
835 dep_t dep;
836 bool found_p = false;
838 if (resolved_p)
840 pro_list_type = SD_LIST_RES_FORW;
841 con_list_type = SD_LIST_RES_BACK;
843 else
845 pro_list_type = SD_LIST_FORW;
846 con_list_type = SD_LIST_BACK;
849 /* Walk through either back list of INSN or forw list of ELEM
850 depending on which one is shorter. */
851 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853 /* Find the dep_link with producer PRO in consumer's back_deps. */
854 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
855 if (DEP_PRO (dep) == pro)
857 found_p = true;
858 break;
861 else
863 /* Find the dep_link with consumer CON in producer's forw_deps. */
864 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
865 if (DEP_CON (dep) == con)
867 found_p = true;
868 break;
872 if (found_p)
874 if (sd_it_ptr != NULL)
875 *sd_it_ptr = sd_it;
877 return dep;
880 return NULL;
883 /* Find a dependency between producer PRO and consumer CON.
884 Use dependency [if available] to check if dependency is present at all.
885 Search through resolved dependency lists if RESOLVED_P is true.
886 If the dependency or NULL if none found. */
887 dep_t
888 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890 if (true_dependency_cache != NULL)
891 /* Avoiding the list walk below can cut compile times dramatically
892 for some code. */
894 int elem_luid = INSN_LUID (pro);
895 int insn_luid = INSN_LUID (con);
897 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
898 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
899 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
900 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
901 return NULL;
904 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
907 /* Add or update a dependence described by DEP.
908 MEM1 and MEM2, if non-null, correspond to memory locations in case of
909 data speculation.
911 The function returns a value indicating if an old entry has been changed
912 or a new entry has been added to insn's backward deps.
914 This function merely checks if producer and consumer is the same insn
915 and doesn't create a dep in this case. Actual manipulation of
916 dependence data structures is performed in add_or_update_dep_1. */
917 static enum DEPS_ADJUST_RESULT
918 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920 rtx_insn *elem = DEP_PRO (dep);
921 rtx_insn *insn = DEP_CON (dep);
923 gcc_assert (INSN_P (insn) && INSN_P (elem));
925 /* Don't depend an insn on itself. */
926 if (insn == elem)
928 if (sched_deps_info->generate_spec_deps)
929 /* INSN has an internal dependence, which we can't overcome. */
930 HAS_INTERNAL_DEP (insn) = 1;
932 return DEP_NODEP;
935 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
938 /* Ask dependency caches what needs to be done for dependence DEP.
939 Return DEP_CREATED if new dependence should be created and there is no
940 need to try to find one searching the dependencies lists.
941 Return DEP_PRESENT if there already is a dependence described by DEP and
942 hence nothing is to be done.
943 Return DEP_CHANGED if there already is a dependence, but it should be
944 updated to incorporate additional information from DEP. */
945 static enum DEPS_ADJUST_RESULT
946 ask_dependency_caches (dep_t dep)
948 int elem_luid = INSN_LUID (DEP_PRO (dep));
949 int insn_luid = INSN_LUID (DEP_CON (dep));
951 gcc_assert (true_dependency_cache != NULL
952 && output_dependency_cache != NULL
953 && anti_dependency_cache != NULL
954 && control_dependency_cache != NULL);
956 if (!(current_sched_info->flags & USE_DEPS_LIST))
958 enum reg_note present_dep_type;
960 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
961 present_dep_type = REG_DEP_TRUE;
962 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
963 present_dep_type = REG_DEP_OUTPUT;
964 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
965 present_dep_type = REG_DEP_ANTI;
966 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
967 present_dep_type = REG_DEP_CONTROL;
968 else
969 /* There is no existing dep so it should be created. */
970 return DEP_CREATED;
972 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
973 /* DEP does not add anything to the existing dependence. */
974 return DEP_PRESENT;
976 else
978 ds_t present_dep_types = 0;
980 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 present_dep_types |= DEP_TRUE;
982 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 present_dep_types |= DEP_OUTPUT;
984 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 present_dep_types |= DEP_ANTI;
986 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 present_dep_types |= DEP_CONTROL;
989 if (present_dep_types == 0)
990 /* There is no existing dep so it should be created. */
991 return DEP_CREATED;
993 if (!(current_sched_info->flags & DO_SPECULATION)
994 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
997 == present_dep_types)
998 /* DEP does not add anything to the existing dependence. */
999 return DEP_PRESENT;
1001 else
1003 /* Only true dependencies can be data speculative and
1004 only anti dependencies can be control speculative. */
1005 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1006 == present_dep_types);
1008 /* if (DEP is SPECULATIVE) then
1009 ..we should update DEP_STATUS
1010 else
1011 ..we should reset existing dep to non-speculative. */
1015 return DEP_CHANGED;
1018 /* Set dependency caches according to DEP. */
1019 static void
1020 set_dependency_caches (dep_t dep)
1022 int elem_luid = INSN_LUID (DEP_PRO (dep));
1023 int insn_luid = INSN_LUID (DEP_CON (dep));
1025 if (!(current_sched_info->flags & USE_DEPS_LIST))
1027 switch (DEP_TYPE (dep))
1029 case REG_DEP_TRUE:
1030 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1031 break;
1033 case REG_DEP_OUTPUT:
1034 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1035 break;
1037 case REG_DEP_ANTI:
1038 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1039 break;
1041 case REG_DEP_CONTROL:
1042 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1043 break;
1045 default:
1046 gcc_unreachable ();
1049 else
1051 ds_t ds = DEP_STATUS (dep);
1053 if (ds & DEP_TRUE)
1054 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1055 if (ds & DEP_OUTPUT)
1056 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1057 if (ds & DEP_ANTI)
1058 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059 if (ds & DEP_CONTROL)
1060 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062 if (ds & SPECULATIVE)
1064 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1065 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1070 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1071 caches accordingly. */
1072 static void
1073 update_dependency_caches (dep_t dep, enum reg_note old_type)
1075 int elem_luid = INSN_LUID (DEP_PRO (dep));
1076 int insn_luid = INSN_LUID (DEP_CON (dep));
1078 /* Clear corresponding cache entry because type of the link
1079 may have changed. Keep them if we use_deps_list. */
1080 if (!(current_sched_info->flags & USE_DEPS_LIST))
1082 switch (old_type)
1084 case REG_DEP_OUTPUT:
1085 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1086 break;
1088 case REG_DEP_ANTI:
1089 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1090 break;
1092 case REG_DEP_CONTROL:
1093 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1094 break;
1096 default:
1097 gcc_unreachable ();
1101 set_dependency_caches (dep);
1104 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1105 static void
1106 change_spec_dep_to_hard (sd_iterator_def sd_it)
1108 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1109 dep_link_t link = DEP_NODE_BACK (node);
1110 dep_t dep = DEP_NODE_DEP (node);
1111 rtx_insn *elem = DEP_PRO (dep);
1112 rtx_insn *insn = DEP_CON (dep);
1114 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116 DEP_STATUS (dep) &= ~SPECULATIVE;
1118 if (true_dependency_cache != NULL)
1119 /* Clear the cache entry. */
1120 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1121 INSN_LUID (elem));
1124 /* Update DEP to incorporate information from NEW_DEP.
1125 SD_IT points to DEP in case it should be moved to another list.
1126 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1127 data-speculative dependence should be updated. */
1128 static enum DEPS_ADJUST_RESULT
1129 update_dep (dep_t dep, dep_t new_dep,
1130 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1131 rtx mem1 ATTRIBUTE_UNUSED,
1132 rtx mem2 ATTRIBUTE_UNUSED)
1134 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1135 enum reg_note old_type = DEP_TYPE (dep);
1136 bool was_spec = dep_spec_p (dep);
1138 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1139 DEP_MULTIPLE (dep) = 1;
1141 /* If this is a more restrictive type of dependence than the
1142 existing one, then change the existing dependence to this
1143 type. */
1144 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1147 res = DEP_CHANGED;
1150 if (current_sched_info->flags & USE_DEPS_LIST)
1151 /* Update DEP_STATUS. */
1153 ds_t dep_status = DEP_STATUS (dep);
1154 ds_t ds = DEP_STATUS (new_dep);
1155 ds_t new_status = ds | dep_status;
1157 if (new_status & SPECULATIVE)
1159 /* Either existing dep or a dep we're adding or both are
1160 speculative. */
1161 if (!(ds & SPECULATIVE)
1162 || !(dep_status & SPECULATIVE))
1163 /* The new dep can't be speculative. */
1164 new_status &= ~SPECULATIVE;
1165 else
1167 /* Both are speculative. Merge probabilities. */
1168 if (mem1 != NULL)
1170 dw_t dw;
1172 dw = estimate_dep_weak (mem1, mem2);
1173 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1176 new_status = ds_merge (dep_status, ds);
1180 ds = new_status;
1182 if (dep_status != ds)
1184 DEP_STATUS (dep) = ds;
1185 res = DEP_CHANGED;
1189 if (was_spec && !dep_spec_p (dep))
1190 /* The old dep was speculative, but now it isn't. */
1191 change_spec_dep_to_hard (sd_it);
1193 if (true_dependency_cache != NULL
1194 && res == DEP_CHANGED)
1195 update_dependency_caches (dep, old_type);
1197 return res;
1200 /* Add or update a dependence described by DEP.
1201 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1202 data speculation.
1204 The function returns a value indicating if an old entry has been changed
1205 or a new entry has been added to insn's backward deps or nothing has
1206 been updated at all. */
1207 static enum DEPS_ADJUST_RESULT
1208 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1209 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211 bool maybe_present_p = true;
1212 bool present_p = false;
1214 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1215 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217 if (flag_checking)
1218 check_dep (new_dep, mem1 != NULL);
1220 if (true_dependency_cache != NULL)
1222 switch (ask_dependency_caches (new_dep))
1224 case DEP_PRESENT:
1225 dep_t present_dep;
1226 sd_iterator_def sd_it;
1228 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1229 DEP_CON (new_dep),
1230 resolved_p, &sd_it);
1231 DEP_MULTIPLE (present_dep) = 1;
1232 return DEP_PRESENT;
1234 case DEP_CHANGED:
1235 maybe_present_p = true;
1236 present_p = true;
1237 break;
1239 case DEP_CREATED:
1240 maybe_present_p = false;
1241 present_p = false;
1242 break;
1244 default:
1245 gcc_unreachable ();
1246 break;
1250 /* Check that we don't already have this dependence. */
1251 if (maybe_present_p)
1253 dep_t present_dep;
1254 sd_iterator_def sd_it;
1256 gcc_assert (true_dependency_cache == NULL || present_p);
1258 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1259 DEP_CON (new_dep),
1260 resolved_p, &sd_it);
1262 if (present_dep != NULL)
1263 /* We found an existing dependency between ELEM and INSN. */
1264 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1265 else
1266 /* We didn't find a dep, it shouldn't present in the cache. */
1267 gcc_assert (!present_p);
1270 /* Might want to check one level of transitivity to save conses.
1271 This check should be done in maybe_add_or_update_dep_1.
1272 Since we made it to add_or_update_dep_1, we must create
1273 (or update) a link. */
1275 if (mem1 != NULL_RTX)
1277 gcc_assert (sched_deps_info->generate_spec_deps);
1278 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1279 estimate_dep_weak (mem1, mem2));
1282 sd_add_dep (new_dep, resolved_p);
1284 return DEP_CREATED;
1287 /* Initialize BACK_LIST_PTR with consumer's backward list and
1288 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1289 initialize with lists that hold resolved deps. */
1290 static void
1291 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1292 deps_list_t *back_list_ptr,
1293 deps_list_t *forw_list_ptr)
1295 rtx_insn *con = DEP_CON (dep);
1297 if (!resolved_p)
1299 if (dep_spec_p (dep))
1300 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1301 else
1302 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1304 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1306 else
1308 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1309 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1313 /* Add dependence described by DEP.
1314 If RESOLVED_P is true treat the dependence as a resolved one. */
1315 void
1316 sd_add_dep (dep_t dep, bool resolved_p)
1318 dep_node_t n = create_dep_node ();
1319 deps_list_t con_back_deps;
1320 deps_list_t pro_forw_deps;
1321 rtx_insn *elem = DEP_PRO (dep);
1322 rtx_insn *insn = DEP_CON (dep);
1324 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1326 if ((current_sched_info->flags & DO_SPECULATION) == 0
1327 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1328 DEP_STATUS (dep) &= ~SPECULATIVE;
1330 copy_dep (DEP_NODE_DEP (n), dep);
1332 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1334 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1336 if (flag_checking)
1337 check_dep (dep, false);
1339 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1342 in the bitmap caches of dependency information. */
1343 if (true_dependency_cache != NULL)
1344 set_dependency_caches (dep);
1347 /* Add or update backward dependence between INSN and ELEM
1348 with given type DEP_TYPE and dep_status DS.
1349 This function is a convenience wrapper. */
1350 enum DEPS_ADJUST_RESULT
1351 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1353 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1356 /* Resolved dependence pointed to by SD_IT.
1357 SD_IT will advance to the next element. */
1358 void
1359 sd_resolve_dep (sd_iterator_def sd_it)
1361 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1362 dep_t dep = DEP_NODE_DEP (node);
1363 rtx_insn *pro = DEP_PRO (dep);
1364 rtx_insn *con = DEP_CON (dep);
1366 if (dep_spec_p (dep))
1367 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1368 INSN_RESOLVED_BACK_DEPS (con));
1369 else
1370 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1371 INSN_RESOLVED_BACK_DEPS (con));
1373 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1374 INSN_RESOLVED_FORW_DEPS (pro));
1377 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1378 pointed to by SD_IT to unresolved state. */
1379 void
1380 sd_unresolve_dep (sd_iterator_def sd_it)
1382 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1383 dep_t dep = DEP_NODE_DEP (node);
1384 rtx_insn *pro = DEP_PRO (dep);
1385 rtx_insn *con = DEP_CON (dep);
1387 if (dep_spec_p (dep))
1388 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1389 INSN_SPEC_BACK_DEPS (con));
1390 else
1391 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1392 INSN_HARD_BACK_DEPS (con));
1394 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1395 INSN_FORW_DEPS (pro));
1398 /* Make TO depend on all the FROM's producers.
1399 If RESOLVED_P is true add dependencies to the resolved lists. */
1400 void
1401 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1403 sd_list_types_def list_type;
1404 sd_iterator_def sd_it;
1405 dep_t dep;
1407 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1409 FOR_EACH_DEP (from, list_type, sd_it, dep)
1411 dep_def _new_dep, *new_dep = &_new_dep;
1413 copy_dep (new_dep, dep);
1414 DEP_CON (new_dep) = to;
1415 sd_add_dep (new_dep, resolved_p);
1419 /* Remove a dependency referred to by SD_IT.
1420 SD_IT will point to the next dependence after removal. */
1421 void
1422 sd_delete_dep (sd_iterator_def sd_it)
1424 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1425 dep_t dep = DEP_NODE_DEP (n);
1426 rtx_insn *pro = DEP_PRO (dep);
1427 rtx_insn *con = DEP_CON (dep);
1428 deps_list_t con_back_deps;
1429 deps_list_t pro_forw_deps;
1431 if (true_dependency_cache != NULL)
1433 int elem_luid = INSN_LUID (pro);
1434 int insn_luid = INSN_LUID (con);
1436 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1437 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1438 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1439 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1441 if (current_sched_info->flags & DO_SPECULATION)
1442 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1445 get_back_and_forw_lists (dep, sd_it.resolved_p,
1446 &con_back_deps, &pro_forw_deps);
1448 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1449 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1451 delete_dep_node (n);
1454 /* Dump size of the lists. */
1455 #define DUMP_LISTS_SIZE (2)
1457 /* Dump dependencies of the lists. */
1458 #define DUMP_LISTS_DEPS (4)
1460 /* Dump all information about the lists. */
1461 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1463 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1464 FLAGS is a bit mask specifying what information about the lists needs
1465 to be printed.
1466 If FLAGS has the very first bit set, then dump all information about
1467 the lists and propagate this bit into the callee dump functions. */
1468 static void
1469 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1471 sd_iterator_def sd_it;
1472 dep_t dep;
1473 int all;
1475 all = (flags & 1);
1477 if (all)
1478 flags |= DUMP_LISTS_ALL;
1480 fprintf (dump, "[");
1482 if (flags & DUMP_LISTS_SIZE)
1483 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1485 if (flags & DUMP_LISTS_DEPS)
1487 FOR_EACH_DEP (insn, types, sd_it, dep)
1489 dump_dep (dump, dep, dump_dep_flags | all);
1490 fprintf (dump, " ");
1495 /* Dump all information about deps_lists of INSN specified by TYPES
1496 to STDERR. */
1497 void
1498 sd_debug_lists (rtx insn, sd_list_types_def types)
1500 dump_lists (stderr, insn, types, 1);
1501 fprintf (stderr, "\n");
1504 /* A wrapper around add_dependence_1, to add a dependence of CON on
1505 PRO, with type DEP_TYPE. This function implements special handling
1506 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1507 the type to REG_DEP_ANTI if we can determine that predication is
1508 impossible; otherwise we add additional true dependencies on the
1509 INSN_COND_DEPS list of the jump (which PRO must be). */
1510 void
1511 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1513 if (dep_type == REG_DEP_CONTROL
1514 && !(current_sched_info->flags & DO_PREDICATION))
1515 dep_type = REG_DEP_ANTI;
1517 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1518 so we must also make the insn dependent on the setter of the
1519 condition. */
1520 if (dep_type == REG_DEP_CONTROL)
1522 rtx_insn *real_pro = pro;
1523 rtx_insn *other = real_insn_for_shadow (real_pro);
1524 rtx cond;
1526 if (other != NULL_RTX)
1527 real_pro = other;
1528 cond = sched_get_reverse_condition_uncached (real_pro);
1529 /* Verify that the insn does not use a different value in
1530 the condition register than the one that was present at
1531 the jump. */
1532 if (cond == NULL_RTX)
1533 dep_type = REG_DEP_ANTI;
1534 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1536 HARD_REG_SET uses;
1537 CLEAR_HARD_REG_SET (uses);
1538 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1539 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1540 dep_type = REG_DEP_ANTI;
1542 if (dep_type == REG_DEP_CONTROL)
1544 if (sched_verbose >= 5)
1545 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1546 INSN_UID (real_pro));
1547 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1548 REG_DEP_TRUE, false);
1552 add_dependence_1 (con, pro, dep_type);
1555 /* A convenience wrapper to operate on an entire list. HARD should be
1556 true if DEP_NONREG should be set on newly created dependencies. */
1558 static void
1559 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1560 enum reg_note dep_type, bool hard)
1562 mark_as_hard = hard;
1563 for (; list; list = list->next ())
1565 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1566 add_dependence (insn, list->insn (), dep_type);
1568 mark_as_hard = false;
1571 /* Similar, but free *LISTP at the same time, when the context
1572 is not readonly. HARD should be true if DEP_NONREG should be set on
1573 newly created dependencies. */
1575 static void
1576 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1577 rtx_insn_list **listp,
1578 int uncond, enum reg_note dep_type, bool hard)
1580 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1582 /* We don't want to short-circuit dependencies involving debug
1583 insns, because they may cause actual dependencies to be
1584 disregarded. */
1585 if (deps->readonly || DEBUG_INSN_P (insn))
1586 return;
1588 free_INSN_LIST_list (listp);
1591 /* Remove all occurrences of INSN from LIST. Return the number of
1592 occurrences removed. */
1594 static int
1595 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1597 int removed = 0;
1599 while (*listp)
1601 if ((*listp)->insn () == insn)
1603 remove_free_INSN_LIST_node (listp);
1604 removed++;
1605 continue;
1608 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1611 return removed;
1614 /* Same as above, but process two lists at once. */
1615 static int
1616 remove_from_both_dependence_lists (rtx_insn *insn,
1617 rtx_insn_list **listp,
1618 rtx_expr_list **exprp)
1620 int removed = 0;
1622 while (*listp)
1624 if (XEXP (*listp, 0) == insn)
1626 remove_free_INSN_LIST_node (listp);
1627 remove_free_EXPR_LIST_node (exprp);
1628 removed++;
1629 continue;
1632 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1633 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1636 return removed;
1639 /* Clear all dependencies for an insn. */
1640 static void
1641 delete_all_dependences (rtx_insn *insn)
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1646 /* The below cycle can be optimized to clear the caches and back_deps
1647 in one call but that would provoke duplication of code from
1648 delete_dep (). */
1650 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1651 sd_iterator_cond (&sd_it, &dep);)
1652 sd_delete_dep (sd_it);
1655 /* All insns in a scheduling group except the first should only have
1656 dependencies on the previous insn in the group. So we find the
1657 first instruction in the scheduling group by walking the dependence
1658 chains backwards. Then we add the dependencies for the group to
1659 the previous nonnote insn. */
1661 static void
1662 chain_to_prev_insn (rtx_insn *insn)
1664 sd_iterator_def sd_it;
1665 dep_t dep;
1666 rtx_insn *prev_nonnote;
1668 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1670 rtx_insn *i = insn;
1671 rtx_insn *pro = DEP_PRO (dep);
1675 i = prev_nonnote_insn (i);
1677 if (pro == i)
1678 goto next_link;
1679 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1681 if (! sched_insns_conditions_mutex_p (i, pro))
1682 add_dependence (i, pro, DEP_TYPE (dep));
1683 next_link:;
1686 delete_all_dependences (insn);
1688 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1689 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1690 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1691 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 /* Process an insn's memory dependencies. There are four kinds of
1695 dependencies:
1697 (0) read dependence: read follows read
1698 (1) true dependence: read follows write
1699 (2) output dependence: write follows write
1700 (3) anti dependence: write follows read
1702 We are careful to build only dependencies which actually exist, and
1703 use transitivity to avoid building too many links. */
1705 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1706 The MEM is a memory reference contained within INSN, which we are saving
1707 so that we can do memory aliasing on it. */
1709 static void
1710 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1711 rtx_insn *insn, rtx mem)
1713 rtx_insn_list **insn_list;
1714 rtx_insn_list *insn_node;
1715 rtx_expr_list **mem_list;
1716 rtx_expr_list *mem_node;
1718 gcc_assert (!deps->readonly);
1719 if (read_p)
1721 insn_list = &deps->pending_read_insns;
1722 mem_list = &deps->pending_read_mems;
1723 if (!DEBUG_INSN_P (insn))
1724 deps->pending_read_list_length++;
1726 else
1728 insn_list = &deps->pending_write_insns;
1729 mem_list = &deps->pending_write_mems;
1730 deps->pending_write_list_length++;
1733 insn_node = alloc_INSN_LIST (insn, *insn_list);
1734 *insn_list = insn_node;
1736 if (sched_deps_info->use_cselib)
1738 mem = shallow_copy_rtx (mem);
1739 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1740 GET_MODE (mem), insn);
1742 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1743 *mem_list = mem_node;
1746 /* Make a dependency between every memory reference on the pending lists
1747 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1748 dependencies for a read operation, similarly with FOR_WRITE. */
1750 static void
1751 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1752 int for_write)
1754 if (for_write)
1756 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1757 1, REG_DEP_ANTI, true);
1758 if (!deps->readonly)
1760 free_EXPR_LIST_list (&deps->pending_read_mems);
1761 deps->pending_read_list_length = 0;
1765 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1766 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1767 true);
1769 add_dependence_list_and_free (deps, insn,
1770 &deps->last_pending_memory_flush, 1,
1771 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1772 true);
1774 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1775 REG_DEP_ANTI, true);
1777 if (DEBUG_INSN_P (insn))
1779 if (for_write)
1780 free_INSN_LIST_list (&deps->pending_read_insns);
1781 free_INSN_LIST_list (&deps->pending_write_insns);
1782 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1783 free_INSN_LIST_list (&deps->pending_jump_insns);
1786 if (!deps->readonly)
1788 free_EXPR_LIST_list (&deps->pending_write_mems);
1789 deps->pending_write_list_length = 0;
1791 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1792 deps->pending_flush_length = 1;
1794 mark_as_hard = false;
1797 /* Instruction which dependencies we are analyzing. */
1798 static rtx_insn *cur_insn = NULL;
1800 /* Implement hooks for haifa scheduler. */
1802 static void
1803 haifa_start_insn (rtx_insn *insn)
1805 gcc_assert (insn && !cur_insn);
1807 cur_insn = insn;
1810 static void
1811 haifa_finish_insn (void)
1813 cur_insn = NULL;
1816 void
1817 haifa_note_reg_set (int regno)
1819 SET_REGNO_REG_SET (reg_pending_sets, regno);
1822 void
1823 haifa_note_reg_clobber (int regno)
1825 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1828 void
1829 haifa_note_reg_use (int regno)
1831 SET_REGNO_REG_SET (reg_pending_uses, regno);
1834 static void
1835 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1837 if (!(ds & SPECULATIVE))
1839 mem = NULL_RTX;
1840 pending_mem = NULL_RTX;
1842 else
1843 gcc_assert (ds & BEGIN_DATA);
1846 dep_def _dep, *dep = &_dep;
1848 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1849 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1850 DEP_NONREG (dep) = 1;
1851 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1856 static void
1857 haifa_note_dep (rtx_insn *elem, ds_t ds)
1859 dep_def _dep;
1860 dep_t dep = &_dep;
1862 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1863 if (mark_as_hard)
1864 DEP_NONREG (dep) = 1;
1865 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1868 static void
1869 note_reg_use (int r)
1871 if (sched_deps_info->note_reg_use)
1872 sched_deps_info->note_reg_use (r);
1875 static void
1876 note_reg_set (int r)
1878 if (sched_deps_info->note_reg_set)
1879 sched_deps_info->note_reg_set (r);
1882 static void
1883 note_reg_clobber (int r)
1885 if (sched_deps_info->note_reg_clobber)
1886 sched_deps_info->note_reg_clobber (r);
1889 static void
1890 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1892 if (sched_deps_info->note_mem_dep)
1893 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1896 static void
1897 note_dep (rtx_insn *e, ds_t ds)
1899 if (sched_deps_info->note_dep)
1900 sched_deps_info->note_dep (e, ds);
1903 /* Return corresponding to DS reg_note. */
1904 enum reg_note
1905 ds_to_dt (ds_t ds)
1907 if (ds & DEP_TRUE)
1908 return REG_DEP_TRUE;
1909 else if (ds & DEP_OUTPUT)
1910 return REG_DEP_OUTPUT;
1911 else if (ds & DEP_ANTI)
1912 return REG_DEP_ANTI;
1913 else
1915 gcc_assert (ds & DEP_CONTROL);
1916 return REG_DEP_CONTROL;
1922 /* Functions for computation of info needed for register pressure
1923 sensitive insn scheduling. */
1926 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1927 static struct reg_use_data *
1928 create_insn_reg_use (int regno, rtx_insn *insn)
1930 struct reg_use_data *use;
1932 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1933 use->regno = regno;
1934 use->insn = insn;
1935 use->next_insn_use = INSN_REG_USE_LIST (insn);
1936 INSN_REG_USE_LIST (insn) = use;
1937 return use;
1940 /* Allocate reg_set_data structure for REGNO and INSN. */
1941 static void
1942 create_insn_reg_set (int regno, rtx insn)
1944 struct reg_set_data *set;
1946 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1947 set->regno = regno;
1948 set->insn = insn;
1949 set->next_insn_set = INSN_REG_SET_LIST (insn);
1950 INSN_REG_SET_LIST (insn) = set;
1953 /* Set up insn register uses for INSN and dependency context DEPS. */
1954 static void
1955 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1957 unsigned i;
1958 reg_set_iterator rsi;
1959 struct reg_use_data *use, *use2, *next;
1960 struct deps_reg *reg_last;
1962 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1964 if (i < FIRST_PSEUDO_REGISTER
1965 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1966 continue;
1968 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1969 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1970 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1971 /* Ignore use which is not dying. */
1972 continue;
1974 use = create_insn_reg_use (i, insn);
1975 use->next_regno_use = use;
1976 reg_last = &deps->reg_last[i];
1978 /* Create the cycle list of uses. */
1979 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1981 use2 = create_insn_reg_use (i, list->insn ());
1982 next = use->next_regno_use;
1983 use->next_regno_use = use2;
1984 use2->next_regno_use = next;
1989 /* Register pressure info for the currently processed insn. */
1990 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1992 /* Return TRUE if INSN has the use structure for REGNO. */
1993 static bool
1994 insn_use_p (rtx insn, int regno)
1996 struct reg_use_data *use;
1998 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1999 if (use->regno == regno)
2000 return true;
2001 return false;
2004 /* Update the register pressure info after birth of pseudo register REGNO
2005 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2006 the register is in clobber or unused after the insn. */
2007 static void
2008 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2010 int incr, new_incr;
2011 enum reg_class cl;
2013 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2014 cl = sched_regno_pressure_class[regno];
2015 if (cl != NO_REGS)
2017 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2018 if (clobber_p)
2020 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2021 reg_pressure_info[cl].clobber_increase = new_incr;
2023 else if (unused_p)
2025 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2026 reg_pressure_info[cl].unused_set_increase = new_incr;
2028 else
2030 new_incr = reg_pressure_info[cl].set_increase + incr;
2031 reg_pressure_info[cl].set_increase = new_incr;
2032 if (! insn_use_p (insn, regno))
2033 reg_pressure_info[cl].change += incr;
2034 create_insn_reg_set (regno, insn);
2036 gcc_assert (new_incr < (1 << INCREASE_BITS));
2040 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2041 hard registers involved in the birth. */
2042 static void
2043 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2044 bool clobber_p, bool unused_p)
2046 enum reg_class cl;
2047 int new_incr, last = regno + nregs;
2049 while (regno < last)
2051 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2052 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2054 cl = sched_regno_pressure_class[regno];
2055 if (cl != NO_REGS)
2057 if (clobber_p)
2059 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2060 reg_pressure_info[cl].clobber_increase = new_incr;
2062 else if (unused_p)
2064 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2065 reg_pressure_info[cl].unused_set_increase = new_incr;
2067 else
2069 new_incr = reg_pressure_info[cl].set_increase + 1;
2070 reg_pressure_info[cl].set_increase = new_incr;
2071 if (! insn_use_p (insn, regno))
2072 reg_pressure_info[cl].change += 1;
2073 create_insn_reg_set (regno, insn);
2075 gcc_assert (new_incr < (1 << INCREASE_BITS));
2078 regno++;
2082 /* Update the register pressure info after birth of pseudo or hard
2083 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2084 correspondingly that the register is in clobber or unused after the
2085 insn. */
2086 static void
2087 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2089 int regno;
2091 if (GET_CODE (reg) == SUBREG)
2092 reg = SUBREG_REG (reg);
2094 if (! REG_P (reg))
2095 return;
2097 regno = REGNO (reg);
2098 if (regno < FIRST_PSEUDO_REGISTER)
2099 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2100 clobber_p, unused_p);
2101 else
2102 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2105 /* Update the register pressure info after death of pseudo register
2106 REGNO. */
2107 static void
2108 mark_pseudo_death (int regno)
2110 int incr;
2111 enum reg_class cl;
2113 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2114 cl = sched_regno_pressure_class[regno];
2115 if (cl != NO_REGS)
2117 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2118 reg_pressure_info[cl].change -= incr;
2122 /* Like mark_pseudo_death except that NREGS saying how many hard
2123 registers involved in the death. */
2124 static void
2125 mark_hard_regno_death (int regno, int nregs)
2127 enum reg_class cl;
2128 int last = regno + nregs;
2130 while (regno < last)
2132 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2133 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2135 cl = sched_regno_pressure_class[regno];
2136 if (cl != NO_REGS)
2137 reg_pressure_info[cl].change -= 1;
2139 regno++;
2143 /* Update the register pressure info after death of pseudo or hard
2144 register REG. */
2145 static void
2146 mark_reg_death (rtx reg)
2148 int regno;
2150 if (GET_CODE (reg) == SUBREG)
2151 reg = SUBREG_REG (reg);
2153 if (! REG_P (reg))
2154 return;
2156 regno = REGNO (reg);
2157 if (regno < FIRST_PSEUDO_REGISTER)
2158 mark_hard_regno_death (regno, REG_NREGS (reg));
2159 else
2160 mark_pseudo_death (regno);
2163 /* Process SETTER of REG. DATA is an insn containing the setter. */
2164 static void
2165 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2167 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2168 return;
2169 mark_insn_reg_birth
2170 ((rtx) data, reg, false,
2171 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2174 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2175 static void
2176 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2178 if (GET_CODE (setter) == CLOBBER)
2179 mark_insn_reg_birth ((rtx) data, reg, true, false);
2182 /* Set up reg pressure info related to INSN. */
2183 void
2184 init_insn_reg_pressure_info (rtx_insn *insn)
2186 int i, len;
2187 enum reg_class cl;
2188 static struct reg_pressure_data *pressure_info;
2189 rtx link;
2191 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2193 if (! INSN_P (insn))
2194 return;
2196 for (i = 0; i < ira_pressure_classes_num; i++)
2198 cl = ira_pressure_classes[i];
2199 reg_pressure_info[cl].clobber_increase = 0;
2200 reg_pressure_info[cl].set_increase = 0;
2201 reg_pressure_info[cl].unused_set_increase = 0;
2202 reg_pressure_info[cl].change = 0;
2205 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2207 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2209 if (AUTO_INC_DEC)
2210 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2211 if (REG_NOTE_KIND (link) == REG_INC)
2212 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2214 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215 if (REG_NOTE_KIND (link) == REG_DEAD)
2216 mark_reg_death (XEXP (link, 0));
2218 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2219 pressure_info
2220 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2221 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2222 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2223 * sizeof (int), 1);
2224 for (i = 0; i < ira_pressure_classes_num; i++)
2226 cl = ira_pressure_classes[i];
2227 pressure_info[i].clobber_increase
2228 = reg_pressure_info[cl].clobber_increase;
2229 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2230 pressure_info[i].unused_set_increase
2231 = reg_pressure_info[cl].unused_set_increase;
2232 pressure_info[i].change = reg_pressure_info[cl].change;
2239 /* Internal variable for sched_analyze_[12] () functions.
2240 If it is nonzero, this means that sched_analyze_[12] looks
2241 at the most toplevel SET. */
2242 static bool can_start_lhs_rhs_p;
2244 /* Extend reg info for the deps context DEPS given that
2245 we have just generated a register numbered REGNO. */
2246 static void
2247 extend_deps_reg_info (struct deps_desc *deps, int regno)
2249 int max_regno = regno + 1;
2251 gcc_assert (!reload_completed);
2253 /* In a readonly context, it would not hurt to extend info,
2254 but it should not be needed. */
2255 if (reload_completed && deps->readonly)
2257 deps->max_reg = max_regno;
2258 return;
2261 if (max_regno > deps->max_reg)
2263 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2264 max_regno);
2265 memset (&deps->reg_last[deps->max_reg],
2266 0, (max_regno - deps->max_reg)
2267 * sizeof (struct deps_reg));
2268 deps->max_reg = max_regno;
2272 /* Extends REG_INFO_P if needed. */
2273 void
2274 maybe_extend_reg_info_p (void)
2276 /* Extend REG_INFO_P, if needed. */
2277 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2279 size_t new_reg_info_p_size = max_regno + 128;
2281 gcc_assert (!reload_completed && sel_sched_p ());
2283 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2284 new_reg_info_p_size,
2285 reg_info_p_size,
2286 sizeof (*reg_info_p));
2287 reg_info_p_size = new_reg_info_p_size;
2291 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2292 The type of the reference is specified by REF and can be SET,
2293 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2295 static void
2296 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2297 enum rtx_code ref, rtx_insn *insn)
2299 /* We could emit new pseudos in renaming. Extend the reg structures. */
2300 if (!reload_completed && sel_sched_p ()
2301 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2302 extend_deps_reg_info (deps, regno);
2304 maybe_extend_reg_info_p ();
2306 /* A hard reg in a wide mode may really be multiple registers.
2307 If so, mark all of them just like the first. */
2308 if (regno < FIRST_PSEUDO_REGISTER)
2310 int i = hard_regno_nregs[regno][mode];
2311 if (ref == SET)
2313 while (--i >= 0)
2314 note_reg_set (regno + i);
2316 else if (ref == USE)
2318 while (--i >= 0)
2319 note_reg_use (regno + i);
2321 else
2323 while (--i >= 0)
2324 note_reg_clobber (regno + i);
2328 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2329 it does not reload. Ignore these as they have served their
2330 purpose already. */
2331 else if (regno >= deps->max_reg)
2333 enum rtx_code code = GET_CODE (PATTERN (insn));
2334 gcc_assert (code == USE || code == CLOBBER);
2337 else
2339 if (ref == SET)
2340 note_reg_set (regno);
2341 else if (ref == USE)
2342 note_reg_use (regno);
2343 else
2344 note_reg_clobber (regno);
2346 /* Pseudos that are REG_EQUIV to something may be replaced
2347 by that during reloading. We need only add dependencies for
2348 the address in the REG_EQUIV note. */
2349 if (!reload_completed && get_reg_known_equiv_p (regno))
2351 rtx t = get_reg_known_value (regno);
2352 if (MEM_P (t))
2353 sched_analyze_2 (deps, XEXP (t, 0), insn);
2356 /* Don't let it cross a call after scheduling if it doesn't
2357 already cross one. */
2358 if (REG_N_CALLS_CROSSED (regno) == 0)
2360 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2361 deps->sched_before_next_call
2362 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2363 else
2364 add_dependence_list (insn, deps->last_function_call, 1,
2365 REG_DEP_ANTI, false);
2370 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2371 rtx, X, creating all dependencies generated by the write to the
2372 destination of X, and reads of everything mentioned. */
2374 static void
2375 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2377 rtx dest = XEXP (x, 0);
2378 enum rtx_code code = GET_CODE (x);
2379 bool cslr_p = can_start_lhs_rhs_p;
2381 can_start_lhs_rhs_p = false;
2383 gcc_assert (dest);
2384 if (dest == 0)
2385 return;
2387 if (cslr_p && sched_deps_info->start_lhs)
2388 sched_deps_info->start_lhs (dest);
2390 if (GET_CODE (dest) == PARALLEL)
2392 int i;
2394 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2395 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2396 sched_analyze_1 (deps,
2397 gen_rtx_CLOBBER (VOIDmode,
2398 XEXP (XVECEXP (dest, 0, i), 0)),
2399 insn);
2401 if (cslr_p && sched_deps_info->finish_lhs)
2402 sched_deps_info->finish_lhs ();
2404 if (code == SET)
2406 can_start_lhs_rhs_p = cslr_p;
2408 sched_analyze_2 (deps, SET_SRC (x), insn);
2410 can_start_lhs_rhs_p = false;
2413 return;
2416 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2417 || GET_CODE (dest) == ZERO_EXTRACT)
2419 if (GET_CODE (dest) == STRICT_LOW_PART
2420 || GET_CODE (dest) == ZERO_EXTRACT
2421 || df_read_modify_subreg_p (dest))
2423 /* These both read and modify the result. We must handle
2424 them as writes to get proper dependencies for following
2425 instructions. We must handle them as reads to get proper
2426 dependencies from this to previous instructions.
2427 Thus we need to call sched_analyze_2. */
2429 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2431 if (GET_CODE (dest) == ZERO_EXTRACT)
2433 /* The second and third arguments are values read by this insn. */
2434 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2435 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2437 dest = XEXP (dest, 0);
2440 if (REG_P (dest))
2442 int regno = REGNO (dest);
2443 machine_mode mode = GET_MODE (dest);
2445 sched_analyze_reg (deps, regno, mode, code, insn);
2447 #ifdef STACK_REGS
2448 /* Treat all writes to a stack register as modifying the TOS. */
2449 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2451 /* Avoid analyzing the same register twice. */
2452 if (regno != FIRST_STACK_REG)
2453 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2455 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2456 FIRST_STACK_REG);
2458 #endif
2460 else if (MEM_P (dest))
2462 /* Writing memory. */
2463 rtx t = dest;
2465 if (sched_deps_info->use_cselib)
2467 machine_mode address_mode = get_address_mode (dest);
2469 t = shallow_copy_rtx (dest);
2470 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2471 GET_MODE (t), insn);
2472 XEXP (t, 0)
2473 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2474 insn);
2476 t = canon_rtx (t);
2478 /* Pending lists can't get larger with a readonly context. */
2479 if (!deps->readonly
2480 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2481 >= MAX_PENDING_LIST_LENGTH))
2483 /* Flush all pending reads and writes to prevent the pending lists
2484 from getting any larger. Insn scheduling runs too slowly when
2485 these lists get long. When compiling GCC with itself,
2486 this flush occurs 8 times for sparc, and 10 times for m88k using
2487 the default value of 32. */
2488 flush_pending_lists (deps, insn, false, true);
2490 else
2492 rtx_insn_list *pending;
2493 rtx_expr_list *pending_mem;
2495 pending = deps->pending_read_insns;
2496 pending_mem = deps->pending_read_mems;
2497 while (pending)
2499 if (anti_dependence (pending_mem->element (), t)
2500 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2501 note_mem_dep (t, pending_mem->element (), pending->insn (),
2502 DEP_ANTI);
2504 pending = pending->next ();
2505 pending_mem = pending_mem->next ();
2508 pending = deps->pending_write_insns;
2509 pending_mem = deps->pending_write_mems;
2510 while (pending)
2512 if (output_dependence (pending_mem->element (), t)
2513 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2514 note_mem_dep (t, pending_mem->element (),
2515 pending->insn (),
2516 DEP_OUTPUT);
2518 pending = pending->next ();
2519 pending_mem = pending_mem-> next ();
2522 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2523 REG_DEP_ANTI, true);
2524 add_dependence_list (insn, deps->pending_jump_insns, 1,
2525 REG_DEP_CONTROL, true);
2527 if (!deps->readonly)
2528 add_insn_mem_dependence (deps, false, insn, dest);
2530 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2533 if (cslr_p && sched_deps_info->finish_lhs)
2534 sched_deps_info->finish_lhs ();
2536 /* Analyze reads. */
2537 if (GET_CODE (x) == SET)
2539 can_start_lhs_rhs_p = cslr_p;
2541 sched_analyze_2 (deps, SET_SRC (x), insn);
2543 can_start_lhs_rhs_p = false;
2547 /* Analyze the uses of memory and registers in rtx X in INSN. */
2548 static void
2549 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2551 int i;
2552 int j;
2553 enum rtx_code code;
2554 const char *fmt;
2555 bool cslr_p = can_start_lhs_rhs_p;
2557 can_start_lhs_rhs_p = false;
2559 gcc_assert (x);
2560 if (x == 0)
2561 return;
2563 if (cslr_p && sched_deps_info->start_rhs)
2564 sched_deps_info->start_rhs (x);
2566 code = GET_CODE (x);
2568 switch (code)
2570 CASE_CONST_ANY:
2571 case SYMBOL_REF:
2572 case CONST:
2573 case LABEL_REF:
2574 /* Ignore constants. */
2575 if (cslr_p && sched_deps_info->finish_rhs)
2576 sched_deps_info->finish_rhs ();
2578 return;
2580 case CC0:
2581 if (!HAVE_cc0)
2582 gcc_unreachable ();
2584 /* User of CC0 depends on immediately preceding insn. */
2585 SCHED_GROUP_P (insn) = 1;
2586 /* Don't move CC0 setter to another block (it can set up the
2587 same flag for previous CC0 users which is safe). */
2588 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2590 if (cslr_p && sched_deps_info->finish_rhs)
2591 sched_deps_info->finish_rhs ();
2593 return;
2595 case REG:
2597 int regno = REGNO (x);
2598 machine_mode mode = GET_MODE (x);
2600 sched_analyze_reg (deps, regno, mode, USE, insn);
2602 #ifdef STACK_REGS
2603 /* Treat all reads of a stack register as modifying the TOS. */
2604 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2606 /* Avoid analyzing the same register twice. */
2607 if (regno != FIRST_STACK_REG)
2608 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2609 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2611 #endif
2613 if (cslr_p && sched_deps_info->finish_rhs)
2614 sched_deps_info->finish_rhs ();
2616 return;
2619 case MEM:
2621 /* Reading memory. */
2622 rtx_insn_list *u;
2623 rtx_insn_list *pending;
2624 rtx_expr_list *pending_mem;
2625 rtx t = x;
2627 if (sched_deps_info->use_cselib)
2629 machine_mode address_mode = get_address_mode (t);
2631 t = shallow_copy_rtx (t);
2632 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2633 GET_MODE (t), insn);
2634 XEXP (t, 0)
2635 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2636 insn);
2639 if (!DEBUG_INSN_P (insn))
2641 t = canon_rtx (t);
2642 pending = deps->pending_read_insns;
2643 pending_mem = deps->pending_read_mems;
2644 while (pending)
2646 if (read_dependence (pending_mem->element (), t)
2647 && ! sched_insns_conditions_mutex_p (insn,
2648 pending->insn ()))
2649 note_mem_dep (t, pending_mem->element (),
2650 pending->insn (),
2651 DEP_ANTI);
2653 pending = pending->next ();
2654 pending_mem = pending_mem->next ();
2657 pending = deps->pending_write_insns;
2658 pending_mem = deps->pending_write_mems;
2659 while (pending)
2661 if (true_dependence (pending_mem->element (), VOIDmode, t)
2662 && ! sched_insns_conditions_mutex_p (insn,
2663 pending->insn ()))
2664 note_mem_dep (t, pending_mem->element (),
2665 pending->insn (),
2666 sched_deps_info->generate_spec_deps
2667 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2669 pending = pending->next ();
2670 pending_mem = pending_mem->next ();
2673 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2674 add_dependence (insn, u->insn (), REG_DEP_ANTI);
2676 for (u = deps->pending_jump_insns; u; u = u->next ())
2677 if (deps_may_trap_p (x))
2679 if ((sched_deps_info->generate_spec_deps)
2680 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2682 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2683 MAX_DEP_WEAK);
2685 note_dep (u->insn (), ds);
2687 else
2688 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2692 /* Always add these dependencies to pending_reads, since
2693 this insn may be followed by a write. */
2694 if (!deps->readonly)
2696 if ((deps->pending_read_list_length
2697 + deps->pending_write_list_length)
2698 >= MAX_PENDING_LIST_LENGTH
2699 && !DEBUG_INSN_P (insn))
2700 flush_pending_lists (deps, insn, true, true);
2701 add_insn_mem_dependence (deps, true, insn, x);
2704 sched_analyze_2 (deps, XEXP (x, 0), insn);
2706 if (cslr_p && sched_deps_info->finish_rhs)
2707 sched_deps_info->finish_rhs ();
2709 return;
2712 /* Force pending stores to memory in case a trap handler needs them.
2713 Also force pending loads from memory; loads and stores can segfault
2714 and the signal handler won't be triggered if the trap insn was moved
2715 above load or store insn. */
2716 case TRAP_IF:
2717 flush_pending_lists (deps, insn, true, true);
2718 break;
2720 case PREFETCH:
2721 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2722 reg_pending_barrier = TRUE_BARRIER;
2723 /* Prefetch insn contains addresses only. So if the prefetch
2724 address has no registers, there will be no dependencies on
2725 the prefetch insn. This is wrong with result code
2726 correctness point of view as such prefetch can be moved below
2727 a jump insn which usually generates MOVE_BARRIER preventing
2728 to move insns containing registers or memories through the
2729 barrier. It is also wrong with generated code performance
2730 point of view as prefetch withouth dependecies will have a
2731 tendency to be issued later instead of earlier. It is hard
2732 to generate accurate dependencies for prefetch insns as
2733 prefetch has only the start address but it is better to have
2734 something than nothing. */
2735 if (!deps->readonly)
2737 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2738 if (sched_deps_info->use_cselib)
2739 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2740 add_insn_mem_dependence (deps, true, insn, x);
2742 break;
2744 case UNSPEC_VOLATILE:
2745 flush_pending_lists (deps, insn, true, true);
2746 /* FALLTHRU */
2748 case ASM_OPERANDS:
2749 case ASM_INPUT:
2751 /* Traditional and volatile asm instructions must be considered to use
2752 and clobber all hard registers, all pseudo-registers and all of
2753 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2755 Consider for instance a volatile asm that changes the fpu rounding
2756 mode. An insn should not be moved across this even if it only uses
2757 pseudo-regs because it might give an incorrectly rounded result. */
2758 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2759 && !DEBUG_INSN_P (insn))
2760 reg_pending_barrier = TRUE_BARRIER;
2762 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2763 We can not just fall through here since then we would be confused
2764 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2765 traditional asms unlike their normal usage. */
2767 if (code == ASM_OPERANDS)
2769 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2770 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2772 if (cslr_p && sched_deps_info->finish_rhs)
2773 sched_deps_info->finish_rhs ();
2775 return;
2777 break;
2780 case PRE_DEC:
2781 case POST_DEC:
2782 case PRE_INC:
2783 case POST_INC:
2784 /* These both read and modify the result. We must handle them as writes
2785 to get proper dependencies for following instructions. We must handle
2786 them as reads to get proper dependencies from this to previous
2787 instructions. Thus we need to pass them to both sched_analyze_1
2788 and sched_analyze_2. We must call sched_analyze_2 first in order
2789 to get the proper antecedent for the read. */
2790 sched_analyze_2 (deps, XEXP (x, 0), insn);
2791 sched_analyze_1 (deps, x, insn);
2793 if (cslr_p && sched_deps_info->finish_rhs)
2794 sched_deps_info->finish_rhs ();
2796 return;
2798 case POST_MODIFY:
2799 case PRE_MODIFY:
2800 /* op0 = op0 + op1 */
2801 sched_analyze_2 (deps, XEXP (x, 0), insn);
2802 sched_analyze_2 (deps, XEXP (x, 1), insn);
2803 sched_analyze_1 (deps, x, insn);
2805 if (cslr_p && sched_deps_info->finish_rhs)
2806 sched_deps_info->finish_rhs ();
2808 return;
2810 default:
2811 break;
2814 /* Other cases: walk the insn. */
2815 fmt = GET_RTX_FORMAT (code);
2816 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2818 if (fmt[i] == 'e')
2819 sched_analyze_2 (deps, XEXP (x, i), insn);
2820 else if (fmt[i] == 'E')
2821 for (j = 0; j < XVECLEN (x, i); j++)
2822 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2825 if (cslr_p && sched_deps_info->finish_rhs)
2826 sched_deps_info->finish_rhs ();
2829 /* Try to group two fusible insns together to prevent scheduler
2830 from scheduling them apart. */
2832 static void
2833 sched_macro_fuse_insns (rtx_insn *insn)
2835 rtx_insn *prev;
2837 if (any_condjump_p (insn))
2839 unsigned int condreg1, condreg2;
2840 rtx cc_reg_1;
2841 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2842 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2843 prev = prev_nonnote_nondebug_insn (insn);
2844 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2845 || !prev
2846 || !modified_in_p (cc_reg_1, prev))
2847 return;
2849 else
2851 rtx insn_set = single_set (insn);
2853 prev = prev_nonnote_nondebug_insn (insn);
2854 if (!prev
2855 || !insn_set
2856 || !single_set (prev))
2857 return;
2861 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2862 SCHED_GROUP_P (insn) = 1;
2866 /* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
2867 void
2868 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2870 extract_insn (insn);
2871 preprocess_constraints (insn);
2872 alternative_mask preferred = get_preferred_alternatives (insn);
2873 ira_implicitly_set_insn_hard_regs (temp, preferred);
2874 AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2877 /* Analyze an INSN with pattern X to find all dependencies. */
2878 static void
2879 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2881 RTX_CODE code = GET_CODE (x);
2882 rtx link;
2883 unsigned i;
2884 reg_set_iterator rsi;
2886 if (! reload_completed)
2888 HARD_REG_SET temp;
2889 get_implicit_reg_pending_clobbers (&temp, insn);
2890 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2893 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2894 && code == SET);
2896 /* Group compare and branch insns for macro-fusion. */
2897 if (targetm.sched.macro_fusion_p
2898 && targetm.sched.macro_fusion_p ())
2899 sched_macro_fuse_insns (insn);
2901 if (may_trap_p (x))
2902 /* Avoid moving trapping instructions across function calls that might
2903 not always return. */
2904 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2905 1, REG_DEP_ANTI, true);
2907 /* We must avoid creating a situation in which two successors of the
2908 current block have different unwind info after scheduling. If at any
2909 point the two paths re-join this leads to incorrect unwind info. */
2910 /* ??? There are certain situations involving a forced frame pointer in
2911 which, with extra effort, we could fix up the unwind info at a later
2912 CFG join. However, it seems better to notice these cases earlier
2913 during prologue generation and avoid marking the frame pointer setup
2914 as frame-related at all. */
2915 if (RTX_FRAME_RELATED_P (insn))
2917 /* Make sure prologue insn is scheduled before next jump. */
2918 deps->sched_before_next_jump
2919 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2921 /* Make sure epilogue insn is scheduled after preceding jumps. */
2922 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2923 true);
2926 if (code == COND_EXEC)
2928 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2930 /* ??? Should be recording conditions so we reduce the number of
2931 false dependencies. */
2932 x = COND_EXEC_CODE (x);
2933 code = GET_CODE (x);
2935 if (code == SET || code == CLOBBER)
2937 sched_analyze_1 (deps, x, insn);
2939 /* Bare clobber insns are used for letting life analysis, reg-stack
2940 and others know that a value is dead. Depend on the last call
2941 instruction so that reg-stack won't get confused. */
2942 if (code == CLOBBER)
2943 add_dependence_list (insn, deps->last_function_call, 1,
2944 REG_DEP_OUTPUT, true);
2946 else if (code == PARALLEL)
2948 for (i = XVECLEN (x, 0); i--;)
2950 rtx sub = XVECEXP (x, 0, i);
2951 code = GET_CODE (sub);
2953 if (code == COND_EXEC)
2955 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2956 sub = COND_EXEC_CODE (sub);
2957 code = GET_CODE (sub);
2959 if (code == SET || code == CLOBBER)
2960 sched_analyze_1 (deps, sub, insn);
2961 else
2962 sched_analyze_2 (deps, sub, insn);
2965 else
2966 sched_analyze_2 (deps, x, insn);
2968 /* Mark registers CLOBBERED or used by called function. */
2969 if (CALL_P (insn))
2971 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2973 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2974 sched_analyze_1 (deps, XEXP (link, 0), insn);
2975 else if (GET_CODE (XEXP (link, 0)) != SET)
2976 sched_analyze_2 (deps, XEXP (link, 0), insn);
2978 /* Don't schedule anything after a tail call, tail call needs
2979 to use at least all call-saved registers. */
2980 if (SIBLING_CALL_P (insn))
2981 reg_pending_barrier = TRUE_BARRIER;
2982 else if (find_reg_note (insn, REG_SETJMP, NULL))
2983 reg_pending_barrier = MOVE_BARRIER;
2986 if (JUMP_P (insn))
2988 rtx_insn *next = next_nonnote_nondebug_insn (insn);
2989 if (next && BARRIER_P (next))
2990 reg_pending_barrier = MOVE_BARRIER;
2991 else
2993 rtx_insn_list *pending;
2994 rtx_expr_list *pending_mem;
2996 if (sched_deps_info->compute_jump_reg_dependencies)
2998 (*sched_deps_info->compute_jump_reg_dependencies)
2999 (insn, reg_pending_control_uses);
3001 /* Make latency of jump equal to 0 by using anti-dependence. */
3002 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3004 struct deps_reg *reg_last = &deps->reg_last[i];
3005 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3006 false);
3007 add_dependence_list (insn, reg_last->implicit_sets,
3008 0, REG_DEP_ANTI, false);
3009 add_dependence_list (insn, reg_last->clobbers, 0,
3010 REG_DEP_ANTI, false);
3014 /* All memory writes and volatile reads must happen before the
3015 jump. Non-volatile reads must happen before the jump iff
3016 the result is needed by the above register used mask. */
3018 pending = deps->pending_write_insns;
3019 pending_mem = deps->pending_write_mems;
3020 while (pending)
3022 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3023 add_dependence (insn, pending->insn (),
3024 REG_DEP_OUTPUT);
3025 pending = pending->next ();
3026 pending_mem = pending_mem->next ();
3029 pending = deps->pending_read_insns;
3030 pending_mem = deps->pending_read_mems;
3031 while (pending)
3033 if (MEM_VOLATILE_P (pending_mem->element ())
3034 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3035 add_dependence (insn, pending->insn (),
3036 REG_DEP_OUTPUT);
3037 pending = pending->next ();
3038 pending_mem = pending_mem->next ();
3041 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3042 REG_DEP_ANTI, true);
3043 add_dependence_list (insn, deps->pending_jump_insns, 1,
3044 REG_DEP_ANTI, true);
3048 /* If this instruction can throw an exception, then moving it changes
3049 where block boundaries fall. This is mighty confusing elsewhere.
3050 Therefore, prevent such an instruction from being moved. Same for
3051 non-jump instructions that define block boundaries.
3052 ??? Unclear whether this is still necessary in EBB mode. If not,
3053 add_branch_dependences should be adjusted for RGN mode instead. */
3054 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3055 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3056 reg_pending_barrier = MOVE_BARRIER;
3058 if (sched_pressure != SCHED_PRESSURE_NONE)
3060 setup_insn_reg_uses (deps, insn);
3061 init_insn_reg_pressure_info (insn);
3064 /* Add register dependencies for insn. */
3065 if (DEBUG_INSN_P (insn))
3067 rtx_insn *prev = deps->last_debug_insn;
3068 rtx_insn_list *u;
3070 if (!deps->readonly)
3071 deps->last_debug_insn = insn;
3073 if (prev)
3074 add_dependence (insn, prev, REG_DEP_ANTI);
3076 add_dependence_list (insn, deps->last_function_call, 1,
3077 REG_DEP_ANTI, false);
3079 if (!sel_sched_p ())
3080 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3081 add_dependence (insn, u->insn (), REG_DEP_ANTI);
3083 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3085 struct deps_reg *reg_last = &deps->reg_last[i];
3086 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3087 /* There's no point in making REG_DEP_CONTROL dependencies for
3088 debug insns. */
3089 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3090 false);
3092 if (!deps->readonly)
3093 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3095 CLEAR_REG_SET (reg_pending_uses);
3097 /* Quite often, a debug insn will refer to stuff in the
3098 previous instruction, but the reason we want this
3099 dependency here is to make sure the scheduler doesn't
3100 gratuitously move a debug insn ahead. This could dirty
3101 DF flags and cause additional analysis that wouldn't have
3102 occurred in compilation without debug insns, and such
3103 additional analysis can modify the generated code. */
3104 prev = PREV_INSN (insn);
3106 if (prev && NONDEBUG_INSN_P (prev))
3107 add_dependence (insn, prev, REG_DEP_ANTI);
3109 else
3111 regset_head set_or_clobbered;
3113 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3115 struct deps_reg *reg_last = &deps->reg_last[i];
3116 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3117 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3118 false);
3119 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3120 false);
3122 if (!deps->readonly)
3124 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3125 reg_last->uses_length++;
3129 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3130 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3132 struct deps_reg *reg_last = &deps->reg_last[i];
3133 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3134 add_dependence_list (insn, reg_last->implicit_sets, 0,
3135 REG_DEP_ANTI, false);
3136 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3137 false);
3139 if (!deps->readonly)
3141 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3142 reg_last->uses_length++;
3146 if (targetm.sched.exposed_pipeline)
3148 INIT_REG_SET (&set_or_clobbered);
3149 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3150 reg_pending_sets);
3151 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3153 struct deps_reg *reg_last = &deps->reg_last[i];
3154 rtx list;
3155 for (list = reg_last->uses; list; list = XEXP (list, 1))
3157 rtx other = XEXP (list, 0);
3158 if (INSN_CACHED_COND (other) != const_true_rtx
3159 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3160 INSN_CACHED_COND (other) = const_true_rtx;
3165 /* If the current insn is conditional, we can't free any
3166 of the lists. */
3167 if (sched_has_condition_p (insn))
3169 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3171 struct deps_reg *reg_last = &deps->reg_last[i];
3172 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3173 false);
3174 add_dependence_list (insn, reg_last->implicit_sets, 0,
3175 REG_DEP_ANTI, false);
3176 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3177 false);
3178 add_dependence_list (insn, reg_last->control_uses, 0,
3179 REG_DEP_CONTROL, false);
3181 if (!deps->readonly)
3183 reg_last->clobbers
3184 = alloc_INSN_LIST (insn, reg_last->clobbers);
3185 reg_last->clobbers_length++;
3188 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3190 struct deps_reg *reg_last = &deps->reg_last[i];
3191 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3192 false);
3193 add_dependence_list (insn, reg_last->implicit_sets, 0,
3194 REG_DEP_ANTI, false);
3195 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3196 false);
3197 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3198 false);
3199 add_dependence_list (insn, reg_last->control_uses, 0,
3200 REG_DEP_CONTROL, false);
3202 if (!deps->readonly)
3203 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3206 else
3208 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3210 struct deps_reg *reg_last = &deps->reg_last[i];
3211 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3212 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3214 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3215 REG_DEP_OUTPUT, false);
3216 add_dependence_list_and_free (deps, insn,
3217 &reg_last->implicit_sets, 0,
3218 REG_DEP_ANTI, false);
3219 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3220 REG_DEP_ANTI, false);
3221 add_dependence_list_and_free (deps, insn,
3222 &reg_last->control_uses, 0,
3223 REG_DEP_ANTI, false);
3224 add_dependence_list_and_free (deps, insn,
3225 &reg_last->clobbers, 0,
3226 REG_DEP_OUTPUT, false);
3228 if (!deps->readonly)
3230 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3231 reg_last->clobbers_length = 0;
3232 reg_last->uses_length = 0;
3235 else
3237 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3238 false);
3239 add_dependence_list (insn, reg_last->implicit_sets, 0,
3240 REG_DEP_ANTI, false);
3241 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3242 false);
3243 add_dependence_list (insn, reg_last->control_uses, 0,
3244 REG_DEP_CONTROL, false);
3247 if (!deps->readonly)
3249 reg_last->clobbers_length++;
3250 reg_last->clobbers
3251 = alloc_INSN_LIST (insn, reg_last->clobbers);
3254 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3256 struct deps_reg *reg_last = &deps->reg_last[i];
3258 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3259 REG_DEP_OUTPUT, false);
3260 add_dependence_list_and_free (deps, insn,
3261 &reg_last->implicit_sets,
3262 0, REG_DEP_ANTI, false);
3263 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3264 REG_DEP_OUTPUT, false);
3265 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3266 REG_DEP_ANTI, false);
3267 add_dependence_list (insn, reg_last->control_uses, 0,
3268 REG_DEP_CONTROL, false);
3270 if (!deps->readonly)
3272 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3273 reg_last->uses_length = 0;
3274 reg_last->clobbers_length = 0;
3278 if (!deps->readonly)
3280 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3282 struct deps_reg *reg_last = &deps->reg_last[i];
3283 reg_last->control_uses
3284 = alloc_INSN_LIST (insn, reg_last->control_uses);
3289 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3290 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3292 struct deps_reg *reg_last = &deps->reg_last[i];
3293 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3294 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3295 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3296 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3297 false);
3299 if (!deps->readonly)
3300 reg_last->implicit_sets
3301 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3304 if (!deps->readonly)
3306 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3307 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3308 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3309 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3310 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3311 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3312 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3314 /* Set up the pending barrier found. */
3315 deps->last_reg_pending_barrier = reg_pending_barrier;
3318 CLEAR_REG_SET (reg_pending_uses);
3319 CLEAR_REG_SET (reg_pending_clobbers);
3320 CLEAR_REG_SET (reg_pending_sets);
3321 CLEAR_REG_SET (reg_pending_control_uses);
3322 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3323 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3325 /* Add dependencies if a scheduling barrier was found. */
3326 if (reg_pending_barrier)
3328 /* In the case of barrier the most added dependencies are not
3329 real, so we use anti-dependence here. */
3330 if (sched_has_condition_p (insn))
3332 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3334 struct deps_reg *reg_last = &deps->reg_last[i];
3335 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3336 true);
3337 add_dependence_list (insn, reg_last->sets, 0,
3338 reg_pending_barrier == TRUE_BARRIER
3339 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3340 add_dependence_list (insn, reg_last->implicit_sets, 0,
3341 REG_DEP_ANTI, true);
3342 add_dependence_list (insn, reg_last->clobbers, 0,
3343 reg_pending_barrier == TRUE_BARRIER
3344 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3347 else
3349 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3351 struct deps_reg *reg_last = &deps->reg_last[i];
3352 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3353 REG_DEP_ANTI, true);
3354 add_dependence_list_and_free (deps, insn,
3355 &reg_last->control_uses, 0,
3356 REG_DEP_CONTROL, true);
3357 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3358 reg_pending_barrier == TRUE_BARRIER
3359 ? REG_DEP_TRUE : REG_DEP_ANTI,
3360 true);
3361 add_dependence_list_and_free (deps, insn,
3362 &reg_last->implicit_sets, 0,
3363 REG_DEP_ANTI, true);
3364 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3365 reg_pending_barrier == TRUE_BARRIER
3366 ? REG_DEP_TRUE : REG_DEP_ANTI,
3367 true);
3369 if (!deps->readonly)
3371 reg_last->uses_length = 0;
3372 reg_last->clobbers_length = 0;
3377 if (!deps->readonly)
3378 for (i = 0; i < (unsigned)deps->max_reg; i++)
3380 struct deps_reg *reg_last = &deps->reg_last[i];
3381 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3382 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3385 /* Don't flush pending lists on speculative checks for
3386 selective scheduling. */
3387 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3388 flush_pending_lists (deps, insn, true, true);
3390 reg_pending_barrier = NOT_A_BARRIER;
3393 /* If a post-call group is still open, see if it should remain so.
3394 This insn must be a simple move of a hard reg to a pseudo or
3395 vice-versa.
3397 We must avoid moving these insns for correctness on targets
3398 with small register classes, and for special registers like
3399 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3400 hard regs for all targets. */
3402 if (deps->in_post_call_group_p)
3404 rtx tmp, set = single_set (insn);
3405 int src_regno, dest_regno;
3407 if (set == NULL)
3409 if (DEBUG_INSN_P (insn))
3410 /* We don't want to mark debug insns as part of the same
3411 sched group. We know they really aren't, but if we use
3412 debug insns to tell that a call group is over, we'll
3413 get different code if debug insns are not there and
3414 instructions that follow seem like they should be part
3415 of the call group.
3417 Also, if we did, chain_to_prev_insn would move the
3418 deps of the debug insn to the call insn, modifying
3419 non-debug post-dependency counts of the debug insn
3420 dependencies and otherwise messing with the scheduling
3421 order.
3423 Instead, let such debug insns be scheduled freely, but
3424 keep the call group open in case there are insns that
3425 should be part of it afterwards. Since we grant debug
3426 insns higher priority than even sched group insns, it
3427 will all turn out all right. */
3428 goto debug_dont_end_call_group;
3429 else
3430 goto end_call_group;
3433 tmp = SET_DEST (set);
3434 if (GET_CODE (tmp) == SUBREG)
3435 tmp = SUBREG_REG (tmp);
3436 if (REG_P (tmp))
3437 dest_regno = REGNO (tmp);
3438 else
3439 goto end_call_group;
3441 tmp = SET_SRC (set);
3442 if (GET_CODE (tmp) == SUBREG)
3443 tmp = SUBREG_REG (tmp);
3444 if ((GET_CODE (tmp) == PLUS
3445 || GET_CODE (tmp) == MINUS)
3446 && REG_P (XEXP (tmp, 0))
3447 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3448 && dest_regno == STACK_POINTER_REGNUM)
3449 src_regno = STACK_POINTER_REGNUM;
3450 else if (REG_P (tmp))
3451 src_regno = REGNO (tmp);
3452 else
3453 goto end_call_group;
3455 if (src_regno < FIRST_PSEUDO_REGISTER
3456 || dest_regno < FIRST_PSEUDO_REGISTER)
3458 if (!deps->readonly
3459 && deps->in_post_call_group_p == post_call_initial)
3460 deps->in_post_call_group_p = post_call;
3462 if (!sel_sched_p () || sched_emulate_haifa_p)
3464 SCHED_GROUP_P (insn) = 1;
3465 CANT_MOVE (insn) = 1;
3468 else
3470 end_call_group:
3471 if (!deps->readonly)
3472 deps->in_post_call_group_p = not_post_call;
3476 debug_dont_end_call_group:
3477 if ((current_sched_info->flags & DO_SPECULATION)
3478 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3479 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3480 be speculated. */
3482 if (sel_sched_p ())
3483 sel_mark_hard_insn (insn);
3484 else
3486 sd_iterator_def sd_it;
3487 dep_t dep;
3489 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3490 sd_iterator_cond (&sd_it, &dep);)
3491 change_spec_dep_to_hard (sd_it);
3495 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3496 honor their original ordering. */
3497 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3499 if (deps->last_args_size)
3500 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3501 if (!deps->readonly)
3502 deps->last_args_size = insn;
3506 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3507 longjmp, loop forever, ...). */
3508 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3509 test for ECF_NORETURN? */
3510 static bool
3511 call_may_noreturn_p (rtx_insn *insn)
3513 rtx call;
3515 /* const or pure calls that aren't looping will always return. */
3516 if (RTL_CONST_OR_PURE_CALL_P (insn)
3517 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3518 return false;
3520 call = get_call_rtx_from (insn);
3521 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3523 rtx symbol = XEXP (XEXP (call, 0), 0);
3524 if (SYMBOL_REF_DECL (symbol)
3525 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3527 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3528 == BUILT_IN_NORMAL)
3529 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3531 case BUILT_IN_BCMP:
3532 case BUILT_IN_BCOPY:
3533 case BUILT_IN_BZERO:
3534 case BUILT_IN_INDEX:
3535 case BUILT_IN_MEMCHR:
3536 case BUILT_IN_MEMCMP:
3537 case BUILT_IN_MEMCPY:
3538 case BUILT_IN_MEMMOVE:
3539 case BUILT_IN_MEMPCPY:
3540 case BUILT_IN_MEMSET:
3541 case BUILT_IN_RINDEX:
3542 case BUILT_IN_STPCPY:
3543 case BUILT_IN_STPNCPY:
3544 case BUILT_IN_STRCAT:
3545 case BUILT_IN_STRCHR:
3546 case BUILT_IN_STRCMP:
3547 case BUILT_IN_STRCPY:
3548 case BUILT_IN_STRCSPN:
3549 case BUILT_IN_STRLEN:
3550 case BUILT_IN_STRNCAT:
3551 case BUILT_IN_STRNCMP:
3552 case BUILT_IN_STRNCPY:
3553 case BUILT_IN_STRPBRK:
3554 case BUILT_IN_STRRCHR:
3555 case BUILT_IN_STRSPN:
3556 case BUILT_IN_STRSTR:
3557 /* Assume certain string/memory builtins always return. */
3558 return false;
3559 default:
3560 break;
3565 /* For all other calls assume that they might not always return. */
3566 return true;
3569 /* Return true if INSN should be made dependent on the previous instruction
3570 group, and if all INSN's dependencies should be moved to the first
3571 instruction of that group. */
3573 static bool
3574 chain_to_prev_insn_p (rtx_insn *insn)
3576 /* INSN forms a group with the previous instruction. */
3577 if (SCHED_GROUP_P (insn))
3578 return true;
3580 /* If the previous instruction clobbers a register R and this one sets
3581 part of R, the clobber was added specifically to help us track the
3582 liveness of R. There's no point scheduling the clobber and leaving
3583 INSN behind, especially if we move the clobber to another block. */
3584 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3585 if (prev
3586 && INSN_P (prev)
3587 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3588 && GET_CODE (PATTERN (prev)) == CLOBBER)
3590 rtx x = XEXP (PATTERN (prev), 0);
3591 if (set_of (x, insn))
3592 return true;
3595 return false;
3598 /* Analyze INSN with DEPS as a context. */
3599 void
3600 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3602 if (sched_deps_info->start_insn)
3603 sched_deps_info->start_insn (insn);
3605 /* Record the condition for this insn. */
3606 if (NONDEBUG_INSN_P (insn))
3608 rtx t;
3609 sched_get_condition_with_rev (insn, NULL);
3610 t = INSN_CACHED_COND (insn);
3611 INSN_COND_DEPS (insn) = NULL;
3612 if (reload_completed
3613 && (current_sched_info->flags & DO_PREDICATION)
3614 && COMPARISON_P (t)
3615 && REG_P (XEXP (t, 0))
3616 && CONSTANT_P (XEXP (t, 1)))
3618 unsigned int regno;
3619 int nregs;
3620 rtx_insn_list *cond_deps = NULL;
3621 t = XEXP (t, 0);
3622 regno = REGNO (t);
3623 nregs = REG_NREGS (t);
3624 while (nregs-- > 0)
3626 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3627 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3628 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3629 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3631 INSN_COND_DEPS (insn) = cond_deps;
3635 if (JUMP_P (insn))
3637 /* Make each JUMP_INSN (but not a speculative check)
3638 a scheduling barrier for memory references. */
3639 if (!deps->readonly
3640 && !(sel_sched_p ()
3641 && sel_insn_is_speculation_check (insn)))
3643 /* Keep the list a reasonable size. */
3644 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3645 flush_pending_lists (deps, insn, true, true);
3646 else
3647 deps->pending_jump_insns
3648 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3651 /* For each insn which shouldn't cross a jump, add a dependence. */
3652 add_dependence_list_and_free (deps, insn,
3653 &deps->sched_before_next_jump, 1,
3654 REG_DEP_ANTI, true);
3656 sched_analyze_insn (deps, PATTERN (insn), insn);
3658 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3660 sched_analyze_insn (deps, PATTERN (insn), insn);
3662 else if (CALL_P (insn))
3664 int i;
3666 CANT_MOVE (insn) = 1;
3668 if (find_reg_note (insn, REG_SETJMP, NULL))
3670 /* This is setjmp. Assume that all registers, not just
3671 hard registers, may be clobbered by this call. */
3672 reg_pending_barrier = MOVE_BARRIER;
3674 else
3676 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3677 /* A call may read and modify global register variables. */
3678 if (global_regs[i])
3680 SET_REGNO_REG_SET (reg_pending_sets, i);
3681 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3683 /* Other call-clobbered hard regs may be clobbered.
3684 Since we only have a choice between 'might be clobbered'
3685 and 'definitely not clobbered', we must include all
3686 partly call-clobbered registers here. */
3687 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3688 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3689 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3690 /* We don't know what set of fixed registers might be used
3691 by the function, but it is certain that the stack pointer
3692 is among them, but be conservative. */
3693 else if (fixed_regs[i])
3694 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3695 /* The frame pointer is normally not used by the function
3696 itself, but by the debugger. */
3697 /* ??? MIPS o32 is an exception. It uses the frame pointer
3698 in the macro expansion of jal but does not represent this
3699 fact in the call_insn rtl. */
3700 else if (i == FRAME_POINTER_REGNUM
3701 || (i == HARD_FRAME_POINTER_REGNUM
3702 && (! reload_completed || frame_pointer_needed)))
3703 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3706 /* For each insn which shouldn't cross a call, add a dependence
3707 between that insn and this call insn. */
3708 add_dependence_list_and_free (deps, insn,
3709 &deps->sched_before_next_call, 1,
3710 REG_DEP_ANTI, true);
3712 sched_analyze_insn (deps, PATTERN (insn), insn);
3714 /* If CALL would be in a sched group, then this will violate
3715 convention that sched group insns have dependencies only on the
3716 previous instruction.
3718 Of course one can say: "Hey! What about head of the sched group?"
3719 And I will answer: "Basic principles (one dep per insn) are always
3720 the same." */
3721 gcc_assert (!SCHED_GROUP_P (insn));
3723 /* In the absence of interprocedural alias analysis, we must flush
3724 all pending reads and writes, and start new dependencies starting
3725 from here. But only flush writes for constant calls (which may
3726 be passed a pointer to something we haven't written yet). */
3727 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3729 if (!deps->readonly)
3731 /* Remember the last function call for limiting lifetimes. */
3732 free_INSN_LIST_list (&deps->last_function_call);
3733 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3735 if (call_may_noreturn_p (insn))
3737 /* Remember the last function call that might not always return
3738 normally for limiting moves of trapping insns. */
3739 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3740 deps->last_function_call_may_noreturn
3741 = alloc_INSN_LIST (insn, NULL_RTX);
3744 /* Before reload, begin a post-call group, so as to keep the
3745 lifetimes of hard registers correct. */
3746 if (! reload_completed)
3747 deps->in_post_call_group_p = post_call;
3751 if (sched_deps_info->use_cselib)
3752 cselib_process_insn (insn);
3754 if (sched_deps_info->finish_insn)
3755 sched_deps_info->finish_insn ();
3757 /* Fixup the dependencies in the sched group. */
3758 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3759 && chain_to_prev_insn_p (insn)
3760 && !sel_sched_p ())
3761 chain_to_prev_insn (insn);
3764 /* Initialize DEPS for the new block beginning with HEAD. */
3765 void
3766 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3768 gcc_assert (!deps->readonly);
3770 /* Before reload, if the previous block ended in a call, show that
3771 we are inside a post-call group, so as to keep the lifetimes of
3772 hard registers correct. */
3773 if (! reload_completed && !LABEL_P (head))
3775 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3777 if (insn && CALL_P (insn))
3778 deps->in_post_call_group_p = post_call_initial;
3782 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3783 dependencies for each insn. */
3784 void
3785 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3787 rtx_insn *insn;
3789 if (sched_deps_info->use_cselib)
3790 cselib_init (CSELIB_RECORD_MEMORY);
3792 deps_start_bb (deps, head);
3794 for (insn = head;; insn = NEXT_INSN (insn))
3797 if (INSN_P (insn))
3799 /* And initialize deps_lists. */
3800 sd_init_insn (insn);
3801 /* Clean up SCHED_GROUP_P which may be set by last
3802 scheduler pass. */
3803 if (SCHED_GROUP_P (insn))
3804 SCHED_GROUP_P (insn) = 0;
3807 deps_analyze_insn (deps, insn);
3809 if (insn == tail)
3811 if (sched_deps_info->use_cselib)
3812 cselib_finish ();
3813 return;
3816 gcc_unreachable ();
3819 /* Helper for sched_free_deps ().
3820 Delete INSN's (RESOLVED_P) backward dependencies. */
3821 static void
3822 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3824 sd_iterator_def sd_it;
3825 dep_t dep;
3826 sd_list_types_def types;
3828 if (resolved_p)
3829 types = SD_LIST_RES_BACK;
3830 else
3831 types = SD_LIST_BACK;
3833 for (sd_it = sd_iterator_start (insn, types);
3834 sd_iterator_cond (&sd_it, &dep);)
3836 dep_link_t link = *sd_it.linkp;
3837 dep_node_t node = DEP_LINK_NODE (link);
3838 deps_list_t back_list;
3839 deps_list_t forw_list;
3841 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3842 remove_from_deps_list (link, back_list);
3843 delete_dep_node (node);
3847 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3848 deps_lists. */
3849 void
3850 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3852 rtx_insn *insn;
3853 rtx_insn *next_tail = NEXT_INSN (tail);
3855 /* We make two passes since some insns may be scheduled before their
3856 dependencies are resolved. */
3857 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3858 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3860 /* Clear forward deps and leave the dep_nodes to the
3861 corresponding back_deps list. */
3862 if (resolved_p)
3863 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3864 else
3865 clear_deps_list (INSN_FORW_DEPS (insn));
3867 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3868 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3870 /* Clear resolved back deps together with its dep_nodes. */
3871 delete_dep_nodes_in_back_deps (insn, resolved_p);
3873 sd_finish_insn (insn);
3877 /* Initialize variables for region data dependence analysis.
3878 When LAZY_REG_LAST is true, do not allocate reg_last array
3879 of struct deps_desc immediately. */
3881 void
3882 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3884 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3886 deps->max_reg = max_reg;
3887 if (lazy_reg_last)
3888 deps->reg_last = NULL;
3889 else
3890 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3891 INIT_REG_SET (&deps->reg_last_in_use);
3893 deps->pending_read_insns = 0;
3894 deps->pending_read_mems = 0;
3895 deps->pending_write_insns = 0;
3896 deps->pending_write_mems = 0;
3897 deps->pending_jump_insns = 0;
3898 deps->pending_read_list_length = 0;
3899 deps->pending_write_list_length = 0;
3900 deps->pending_flush_length = 0;
3901 deps->last_pending_memory_flush = 0;
3902 deps->last_function_call = 0;
3903 deps->last_function_call_may_noreturn = 0;
3904 deps->sched_before_next_call = 0;
3905 deps->sched_before_next_jump = 0;
3906 deps->in_post_call_group_p = not_post_call;
3907 deps->last_debug_insn = 0;
3908 deps->last_args_size = 0;
3909 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3910 deps->readonly = 0;
3913 /* Init only reg_last field of DEPS, which was not allocated before as
3914 we inited DEPS lazily. */
3915 void
3916 init_deps_reg_last (struct deps_desc *deps)
3918 gcc_assert (deps && deps->max_reg > 0);
3919 gcc_assert (deps->reg_last == NULL);
3921 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3925 /* Free insn lists found in DEPS. */
3927 void
3928 free_deps (struct deps_desc *deps)
3930 unsigned i;
3931 reg_set_iterator rsi;
3933 /* We set max_reg to 0 when this context was already freed. */
3934 if (deps->max_reg == 0)
3936 gcc_assert (deps->reg_last == NULL);
3937 return;
3939 deps->max_reg = 0;
3941 free_INSN_LIST_list (&deps->pending_read_insns);
3942 free_EXPR_LIST_list (&deps->pending_read_mems);
3943 free_INSN_LIST_list (&deps->pending_write_insns);
3944 free_EXPR_LIST_list (&deps->pending_write_mems);
3945 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3947 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3948 times. For a testcase with 42000 regs and 8000 small basic blocks,
3949 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3950 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3952 struct deps_reg *reg_last = &deps->reg_last[i];
3953 if (reg_last->uses)
3954 free_INSN_LIST_list (&reg_last->uses);
3955 if (reg_last->sets)
3956 free_INSN_LIST_list (&reg_last->sets);
3957 if (reg_last->implicit_sets)
3958 free_INSN_LIST_list (&reg_last->implicit_sets);
3959 if (reg_last->control_uses)
3960 free_INSN_LIST_list (&reg_last->control_uses);
3961 if (reg_last->clobbers)
3962 free_INSN_LIST_list (&reg_last->clobbers);
3964 CLEAR_REG_SET (&deps->reg_last_in_use);
3966 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3967 it at all. */
3968 free (deps->reg_last);
3969 deps->reg_last = NULL;
3971 deps = NULL;
3974 /* Remove INSN from dependence contexts DEPS. */
3975 void
3976 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
3978 int removed;
3979 unsigned i;
3980 reg_set_iterator rsi;
3982 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3983 &deps->pending_read_mems);
3984 if (!DEBUG_INSN_P (insn))
3985 deps->pending_read_list_length -= removed;
3986 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3987 &deps->pending_write_mems);
3988 deps->pending_write_list_length -= removed;
3990 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3991 deps->pending_flush_length -= removed;
3992 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3993 deps->pending_flush_length -= removed;
3995 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3997 struct deps_reg *reg_last = &deps->reg_last[i];
3998 if (reg_last->uses)
3999 remove_from_dependence_list (insn, &reg_last->uses);
4000 if (reg_last->sets)
4001 remove_from_dependence_list (insn, &reg_last->sets);
4002 if (reg_last->implicit_sets)
4003 remove_from_dependence_list (insn, &reg_last->implicit_sets);
4004 if (reg_last->clobbers)
4005 remove_from_dependence_list (insn, &reg_last->clobbers);
4006 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4007 && !reg_last->clobbers)
4008 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4011 if (CALL_P (insn))
4013 remove_from_dependence_list (insn, &deps->last_function_call);
4014 remove_from_dependence_list (insn,
4015 &deps->last_function_call_may_noreturn);
4017 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4020 /* Init deps data vector. */
4021 static void
4022 init_deps_data_vector (void)
4024 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4025 if (reserve > 0 && ! h_d_i_d.space (reserve))
4026 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4029 /* If it is profitable to use them, initialize or extend (depending on
4030 GLOBAL_P) dependency data. */
4031 void
4032 sched_deps_init (bool global_p)
4034 /* Average number of insns in the basic block.
4035 '+ 1' is used to make it nonzero. */
4036 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4038 init_deps_data_vector ();
4040 /* We use another caching mechanism for selective scheduling, so
4041 we don't use this one. */
4042 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4044 /* ?!? We could save some memory by computing a per-region luid mapping
4045 which could reduce both the number of vectors in the cache and the
4046 size of each vector. Instead we just avoid the cache entirely unless
4047 the average number of instructions in a basic block is very high. See
4048 the comment before the declaration of true_dependency_cache for
4049 what we consider "very high". */
4050 cache_size = 0;
4051 extend_dependency_caches (sched_max_luid, true);
4054 if (global_p)
4056 dl_pool = new object_allocator<_deps_list> ("deps_list");
4057 /* Allocate lists for one block at a time. */
4058 dn_pool = new object_allocator<_dep_node> ("dep_node");
4059 /* Allocate nodes for one block at a time. */
4064 /* Create or extend (depending on CREATE_P) dependency caches to
4065 size N. */
4066 void
4067 extend_dependency_caches (int n, bool create_p)
4069 if (create_p || true_dependency_cache)
4071 int i, luid = cache_size + n;
4073 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4074 luid);
4075 output_dependency_cache = XRESIZEVEC (bitmap_head,
4076 output_dependency_cache, luid);
4077 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4078 luid);
4079 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4080 luid);
4082 if (current_sched_info->flags & DO_SPECULATION)
4083 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4084 luid);
4086 for (i = cache_size; i < luid; i++)
4088 bitmap_initialize (&true_dependency_cache[i], 0);
4089 bitmap_initialize (&output_dependency_cache[i], 0);
4090 bitmap_initialize (&anti_dependency_cache[i], 0);
4091 bitmap_initialize (&control_dependency_cache[i], 0);
4093 if (current_sched_info->flags & DO_SPECULATION)
4094 bitmap_initialize (&spec_dependency_cache[i], 0);
4096 cache_size = luid;
4100 /* Finalize dependency information for the whole function. */
4101 void
4102 sched_deps_finish (void)
4104 gcc_assert (deps_pools_are_empty_p ());
4105 delete dn_pool;
4106 delete dl_pool;
4107 dn_pool = NULL;
4108 dl_pool = NULL;
4110 h_d_i_d.release ();
4111 cache_size = 0;
4113 if (true_dependency_cache)
4115 int i;
4117 for (i = 0; i < cache_size; i++)
4119 bitmap_clear (&true_dependency_cache[i]);
4120 bitmap_clear (&output_dependency_cache[i]);
4121 bitmap_clear (&anti_dependency_cache[i]);
4122 bitmap_clear (&control_dependency_cache[i]);
4124 if (sched_deps_info->generate_spec_deps)
4125 bitmap_clear (&spec_dependency_cache[i]);
4127 free (true_dependency_cache);
4128 true_dependency_cache = NULL;
4129 free (output_dependency_cache);
4130 output_dependency_cache = NULL;
4131 free (anti_dependency_cache);
4132 anti_dependency_cache = NULL;
4133 free (control_dependency_cache);
4134 control_dependency_cache = NULL;
4136 if (sched_deps_info->generate_spec_deps)
4138 free (spec_dependency_cache);
4139 spec_dependency_cache = NULL;
4145 /* Initialize some global variables needed by the dependency analysis
4146 code. */
4148 void
4149 init_deps_global (void)
4151 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4152 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4153 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4154 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4155 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4156 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4157 reg_pending_barrier = NOT_A_BARRIER;
4159 if (!sel_sched_p () || sched_emulate_haifa_p)
4161 sched_deps_info->start_insn = haifa_start_insn;
4162 sched_deps_info->finish_insn = haifa_finish_insn;
4164 sched_deps_info->note_reg_set = haifa_note_reg_set;
4165 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4166 sched_deps_info->note_reg_use = haifa_note_reg_use;
4168 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4169 sched_deps_info->note_dep = haifa_note_dep;
4173 /* Free everything used by the dependency analysis code. */
4175 void
4176 finish_deps_global (void)
4178 FREE_REG_SET (reg_pending_sets);
4179 FREE_REG_SET (reg_pending_clobbers);
4180 FREE_REG_SET (reg_pending_uses);
4181 FREE_REG_SET (reg_pending_control_uses);
4184 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4185 dw_t
4186 estimate_dep_weak (rtx mem1, rtx mem2)
4188 if (mem1 == mem2)
4189 /* MEMs are the same - don't speculate. */
4190 return MIN_DEP_WEAK;
4192 rtx r1 = XEXP (mem1, 0);
4193 rtx r2 = XEXP (mem2, 0);
4195 if (sched_deps_info->use_cselib)
4197 /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4198 dangling at this point, since we never preserve them. Instead we
4199 canonicalize manually to get stable VALUEs out of hashing. */
4200 if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4201 r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4202 if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4203 r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4206 if (r1 == r2
4207 || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4208 /* Again, MEMs are the same. */
4209 return MIN_DEP_WEAK;
4210 else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4211 /* Different addressing modes - reason to be more speculative,
4212 than usual. */
4213 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4214 else
4215 /* We can't say anything about the dependence. */
4216 return UNCERTAIN_DEP_WEAK;
4219 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4220 This function can handle same INSN and ELEM (INSN == ELEM).
4221 It is a convenience wrapper. */
4222 static void
4223 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4225 ds_t ds;
4226 bool internal;
4228 if (dep_type == REG_DEP_TRUE)
4229 ds = DEP_TRUE;
4230 else if (dep_type == REG_DEP_OUTPUT)
4231 ds = DEP_OUTPUT;
4232 else if (dep_type == REG_DEP_CONTROL)
4233 ds = DEP_CONTROL;
4234 else
4236 gcc_assert (dep_type == REG_DEP_ANTI);
4237 ds = DEP_ANTI;
4240 /* When add_dependence is called from inside sched-deps.c, we expect
4241 cur_insn to be non-null. */
4242 internal = cur_insn != NULL;
4243 if (internal)
4244 gcc_assert (insn == cur_insn);
4245 else
4246 cur_insn = insn;
4248 note_dep (elem, ds);
4249 if (!internal)
4250 cur_insn = NULL;
4253 /* Return weakness of speculative type TYPE in the dep_status DS,
4254 without checking to prevent ICEs on malformed input. */
4255 static dw_t
4256 get_dep_weak_1 (ds_t ds, ds_t type)
4258 ds = ds & type;
4260 switch (type)
4262 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4263 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4264 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4265 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4266 default: gcc_unreachable ();
4269 return (dw_t) ds;
4272 /* Return weakness of speculative type TYPE in the dep_status DS. */
4273 dw_t
4274 get_dep_weak (ds_t ds, ds_t type)
4276 dw_t dw = get_dep_weak_1 (ds, type);
4278 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4279 return dw;
4282 /* Return the dep_status, which has the same parameters as DS, except for
4283 speculative type TYPE, that will have weakness DW. */
4284 ds_t
4285 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4287 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4289 ds &= ~type;
4290 switch (type)
4292 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4293 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4294 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4295 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4296 default: gcc_unreachable ();
4298 return ds;
4301 /* Return the join of two dep_statuses DS1 and DS2.
4302 If MAX_P is true then choose the greater probability,
4303 otherwise multiply probabilities.
4304 This function assumes that both DS1 and DS2 contain speculative bits. */
4305 static ds_t
4306 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4308 ds_t ds, t;
4310 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4312 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4314 t = FIRST_SPEC_TYPE;
4317 if ((ds1 & t) && !(ds2 & t))
4318 ds |= ds1 & t;
4319 else if (!(ds1 & t) && (ds2 & t))
4320 ds |= ds2 & t;
4321 else if ((ds1 & t) && (ds2 & t))
4323 dw_t dw1 = get_dep_weak (ds1, t);
4324 dw_t dw2 = get_dep_weak (ds2, t);
4325 ds_t dw;
4327 if (!max_p)
4329 dw = ((ds_t) dw1) * ((ds_t) dw2);
4330 dw /= MAX_DEP_WEAK;
4331 if (dw < MIN_DEP_WEAK)
4332 dw = MIN_DEP_WEAK;
4334 else
4336 if (dw1 >= dw2)
4337 dw = dw1;
4338 else
4339 dw = dw2;
4342 ds = set_dep_weak (ds, t, (dw_t) dw);
4345 if (t == LAST_SPEC_TYPE)
4346 break;
4347 t <<= SPEC_TYPE_SHIFT;
4349 while (1);
4351 return ds;
4354 /* Return the join of two dep_statuses DS1 and DS2.
4355 This function assumes that both DS1 and DS2 contain speculative bits. */
4356 ds_t
4357 ds_merge (ds_t ds1, ds_t ds2)
4359 return ds_merge_1 (ds1, ds2, false);
4362 /* Return the join of two dep_statuses DS1 and DS2. */
4363 ds_t
4364 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4366 ds_t new_status = ds | ds2;
4368 if (new_status & SPECULATIVE)
4370 if ((ds && !(ds & SPECULATIVE))
4371 || (ds2 && !(ds2 & SPECULATIVE)))
4372 /* Then this dep can't be speculative. */
4373 new_status &= ~SPECULATIVE;
4374 else
4376 /* Both are speculative. Merging probabilities. */
4377 if (mem1)
4379 dw_t dw;
4381 dw = estimate_dep_weak (mem1, mem2);
4382 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4385 if (!ds)
4386 new_status = ds2;
4387 else if (!ds2)
4388 new_status = ds;
4389 else
4390 new_status = ds_merge (ds2, ds);
4394 return new_status;
4397 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4398 probabilities. */
4399 ds_t
4400 ds_max_merge (ds_t ds1, ds_t ds2)
4402 if (ds1 == 0 && ds2 == 0)
4403 return 0;
4405 if (ds1 == 0 && ds2 != 0)
4406 return ds2;
4408 if (ds1 != 0 && ds2 == 0)
4409 return ds1;
4411 return ds_merge_1 (ds1, ds2, true);
4414 /* Return the probability of speculation success for the speculation
4415 status DS. */
4416 dw_t
4417 ds_weak (ds_t ds)
4419 ds_t res = 1, dt;
4420 int n = 0;
4422 dt = FIRST_SPEC_TYPE;
4425 if (ds & dt)
4427 res *= (ds_t) get_dep_weak (ds, dt);
4428 n++;
4431 if (dt == LAST_SPEC_TYPE)
4432 break;
4433 dt <<= SPEC_TYPE_SHIFT;
4435 while (1);
4437 gcc_assert (n);
4438 while (--n)
4439 res /= MAX_DEP_WEAK;
4441 if (res < MIN_DEP_WEAK)
4442 res = MIN_DEP_WEAK;
4444 gcc_assert (res <= MAX_DEP_WEAK);
4446 return (dw_t) res;
4449 /* Return a dep status that contains all speculation types of DS. */
4450 ds_t
4451 ds_get_speculation_types (ds_t ds)
4453 if (ds & BEGIN_DATA)
4454 ds |= BEGIN_DATA;
4455 if (ds & BE_IN_DATA)
4456 ds |= BE_IN_DATA;
4457 if (ds & BEGIN_CONTROL)
4458 ds |= BEGIN_CONTROL;
4459 if (ds & BE_IN_CONTROL)
4460 ds |= BE_IN_CONTROL;
4462 return ds & SPECULATIVE;
4465 /* Return a dep status that contains maximal weakness for each speculation
4466 type present in DS. */
4467 ds_t
4468 ds_get_max_dep_weak (ds_t ds)
4470 if (ds & BEGIN_DATA)
4471 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4472 if (ds & BE_IN_DATA)
4473 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4474 if (ds & BEGIN_CONTROL)
4475 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4476 if (ds & BE_IN_CONTROL)
4477 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4479 return ds;
4482 /* Dump information about the dependence status S. */
4483 static void
4484 dump_ds (FILE *f, ds_t s)
4486 fprintf (f, "{");
4488 if (s & BEGIN_DATA)
4489 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4490 if (s & BE_IN_DATA)
4491 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4492 if (s & BEGIN_CONTROL)
4493 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4494 if (s & BE_IN_CONTROL)
4495 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4497 if (s & HARD_DEP)
4498 fprintf (f, "HARD_DEP; ");
4500 if (s & DEP_TRUE)
4501 fprintf (f, "DEP_TRUE; ");
4502 if (s & DEP_OUTPUT)
4503 fprintf (f, "DEP_OUTPUT; ");
4504 if (s & DEP_ANTI)
4505 fprintf (f, "DEP_ANTI; ");
4506 if (s & DEP_CONTROL)
4507 fprintf (f, "DEP_CONTROL; ");
4509 fprintf (f, "}");
4512 DEBUG_FUNCTION void
4513 debug_ds (ds_t s)
4515 dump_ds (stderr, s);
4516 fprintf (stderr, "\n");
4519 /* Verify that dependence type and status are consistent.
4520 If RELAXED_P is true, then skip dep_weakness checks. */
4521 static void
4522 check_dep (dep_t dep, bool relaxed_p)
4524 enum reg_note dt = DEP_TYPE (dep);
4525 ds_t ds = DEP_STATUS (dep);
4527 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4529 if (!(current_sched_info->flags & USE_DEPS_LIST))
4531 gcc_assert (ds == 0);
4532 return;
4535 /* Check that dependence type contains the same bits as the status. */
4536 if (dt == REG_DEP_TRUE)
4537 gcc_assert (ds & DEP_TRUE);
4538 else if (dt == REG_DEP_OUTPUT)
4539 gcc_assert ((ds & DEP_OUTPUT)
4540 && !(ds & DEP_TRUE));
4541 else if (dt == REG_DEP_ANTI)
4542 gcc_assert ((ds & DEP_ANTI)
4543 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4544 else
4545 gcc_assert (dt == REG_DEP_CONTROL
4546 && (ds & DEP_CONTROL)
4547 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4549 /* HARD_DEP can not appear in dep_status of a link. */
4550 gcc_assert (!(ds & HARD_DEP));
4552 /* Check that dependence status is set correctly when speculation is not
4553 supported. */
4554 if (!sched_deps_info->generate_spec_deps)
4555 gcc_assert (!(ds & SPECULATIVE));
4556 else if (ds & SPECULATIVE)
4558 if (!relaxed_p)
4560 ds_t type = FIRST_SPEC_TYPE;
4562 /* Check that dependence weakness is in proper range. */
4565 if (ds & type)
4566 get_dep_weak (ds, type);
4568 if (type == LAST_SPEC_TYPE)
4569 break;
4570 type <<= SPEC_TYPE_SHIFT;
4572 while (1);
4575 if (ds & BEGIN_SPEC)
4577 /* Only true dependence can be data speculative. */
4578 if (ds & BEGIN_DATA)
4579 gcc_assert (ds & DEP_TRUE);
4581 /* Control dependencies in the insn scheduler are represented by
4582 anti-dependencies, therefore only anti dependence can be
4583 control speculative. */
4584 if (ds & BEGIN_CONTROL)
4585 gcc_assert (ds & DEP_ANTI);
4587 else
4589 /* Subsequent speculations should resolve true dependencies. */
4590 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4593 /* Check that true and anti dependencies can't have other speculative
4594 statuses. */
4595 if (ds & DEP_TRUE)
4596 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4597 /* An output dependence can't be speculative at all. */
4598 gcc_assert (!(ds & DEP_OUTPUT));
4599 if (ds & DEP_ANTI)
4600 gcc_assert (ds & BEGIN_CONTROL);
4604 /* The following code discovers opportunities to switch a memory reference
4605 and an increment by modifying the address. We ensure that this is done
4606 only for dependencies that are only used to show a single register
4607 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4608 instruction involved is subject to only one dep that can cause a pattern
4609 change.
4611 When we discover a suitable dependency, we fill in the dep_replacement
4612 structure to show how to modify the memory reference. */
4614 /* Holds information about a pair of memory reference and register increment
4615 insns which depend on each other, but could possibly be interchanged. */
4616 struct mem_inc_info
4618 rtx_insn *inc_insn;
4619 rtx_insn *mem_insn;
4621 rtx *mem_loc;
4622 /* A register occurring in the memory address for which we wish to break
4623 the dependence. This must be identical to the destination register of
4624 the increment. */
4625 rtx mem_reg0;
4626 /* Any kind of index that is added to that register. */
4627 rtx mem_index;
4628 /* The constant offset used in the memory address. */
4629 HOST_WIDE_INT mem_constant;
4630 /* The constant added in the increment insn. Negated if the increment is
4631 after the memory address. */
4632 HOST_WIDE_INT inc_constant;
4633 /* The source register used in the increment. May be different from mem_reg0
4634 if the increment occurs before the memory address. */
4635 rtx inc_input;
4638 /* Verify that the memory location described in MII can be replaced with
4639 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4640 insn remains unchanged by this function. */
4642 static rtx
4643 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4645 rtx mem = *mii->mem_loc;
4646 rtx new_mem;
4648 /* Jump through a lot of hoops to keep the attributes up to date. We
4649 do not want to call one of the change address variants that take
4650 an offset even though we know the offset in many cases. These
4651 assume you are changing where the address is pointing by the
4652 offset. */
4653 new_mem = replace_equiv_address_nv (mem, new_addr);
4654 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4656 if (sched_verbose >= 5)
4657 fprintf (sched_dump, "validation failure\n");
4658 return NULL_RTX;
4661 /* Put back the old one. */
4662 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4664 return new_mem;
4667 /* Return true if INSN is of a form "a = b op c" where a and b are
4668 regs. op is + if c is a reg and +|- if c is a const. Fill in
4669 informantion in MII about what is found.
4670 BEFORE_MEM indicates whether the increment is found before or after
4671 a corresponding memory reference. */
4673 static bool
4674 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4676 rtx pat = single_set (insn);
4677 rtx src, cst;
4678 bool regs_equal;
4680 if (RTX_FRAME_RELATED_P (insn) || !pat)
4681 return false;
4683 /* Result must be single reg. */
4684 if (!REG_P (SET_DEST (pat)))
4685 return false;
4687 if (GET_CODE (SET_SRC (pat)) != PLUS)
4688 return false;
4690 mii->inc_insn = insn;
4691 src = SET_SRC (pat);
4692 mii->inc_input = XEXP (src, 0);
4694 if (!REG_P (XEXP (src, 0)))
4695 return false;
4697 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4698 return false;
4700 cst = XEXP (src, 1);
4701 if (!CONST_INT_P (cst))
4702 return false;
4703 mii->inc_constant = INTVAL (cst);
4705 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4707 if (!before_mem)
4709 mii->inc_constant = -mii->inc_constant;
4710 if (!regs_equal)
4711 return false;
4714 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4716 /* Note that the sign has already been reversed for !before_mem. */
4717 if (STACK_GROWS_DOWNWARD)
4718 return mii->inc_constant > 0;
4719 else
4720 return mii->inc_constant < 0;
4722 return true;
4725 /* Once a suitable mem reference has been found and the corresponding data
4726 in MII has been filled in, this function is called to find a suitable
4727 add or inc insn involving the register we found in the memory
4728 reference. */
4730 static bool
4731 find_inc (struct mem_inc_info *mii, bool backwards)
4733 sd_iterator_def sd_it;
4734 dep_t dep;
4736 sd_it = sd_iterator_start (mii->mem_insn,
4737 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4738 while (sd_iterator_cond (&sd_it, &dep))
4740 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4741 rtx_insn *pro = DEP_PRO (dep);
4742 rtx_insn *con = DEP_CON (dep);
4743 rtx_insn *inc_cand = backwards ? pro : con;
4744 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4745 goto next;
4746 if (parse_add_or_inc (mii, inc_cand, backwards))
4748 struct dep_replacement *desc;
4749 df_ref def;
4750 rtx newaddr, newmem;
4752 if (sched_verbose >= 5)
4753 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4754 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4756 /* Need to assure that none of the operands of the inc
4757 instruction are assigned to by the mem insn. */
4758 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4759 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4760 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4762 if (sched_verbose >= 5)
4763 fprintf (sched_dump,
4764 "inc conflicts with store failure.\n");
4765 goto next;
4768 newaddr = mii->inc_input;
4769 if (mii->mem_index != NULL_RTX)
4770 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4771 mii->mem_index);
4772 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4773 mii->mem_constant + mii->inc_constant);
4774 newmem = attempt_change (mii, newaddr);
4775 if (newmem == NULL_RTX)
4776 goto next;
4777 if (sched_verbose >= 5)
4778 fprintf (sched_dump, "successful address replacement\n");
4779 desc = XCNEW (struct dep_replacement);
4780 DEP_REPLACE (dep) = desc;
4781 desc->loc = mii->mem_loc;
4782 desc->newval = newmem;
4783 desc->orig = *desc->loc;
4784 desc->insn = mii->mem_insn;
4785 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4786 INSN_SPEC_BACK_DEPS (con));
4787 if (backwards)
4789 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4790 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4791 REG_DEP_TRUE);
4793 else
4795 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4796 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4797 REG_DEP_ANTI);
4799 return true;
4801 next:
4802 sd_iterator_next (&sd_it);
4804 return false;
4807 /* A recursive function that walks ADDRESS_OF_X to find memory references
4808 which could be modified during scheduling. We call find_inc for each
4809 one we find that has a recognizable form. MII holds information about
4810 the pair of memory/increment instructions.
4811 We ensure that every instruction with a memory reference (which will be
4812 the location of the replacement) is assigned at most one breakable
4813 dependency. */
4815 static bool
4816 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4818 rtx x = *address_of_x;
4819 enum rtx_code code = GET_CODE (x);
4820 const char *const fmt = GET_RTX_FORMAT (code);
4821 int i;
4823 if (code == MEM)
4825 rtx reg0 = XEXP (x, 0);
4827 mii->mem_loc = address_of_x;
4828 mii->mem_index = NULL_RTX;
4829 mii->mem_constant = 0;
4830 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4832 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4833 reg0 = XEXP (reg0, 0);
4835 if (GET_CODE (reg0) == PLUS)
4837 mii->mem_index = XEXP (reg0, 1);
4838 reg0 = XEXP (reg0, 0);
4840 if (REG_P (reg0))
4842 df_ref use;
4843 int occurrences = 0;
4845 /* Make sure this reg appears only once in this insn. Can't use
4846 count_occurrences since that only works for pseudos. */
4847 FOR_EACH_INSN_USE (use, mii->mem_insn)
4848 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4849 if (++occurrences > 1)
4851 if (sched_verbose >= 5)
4852 fprintf (sched_dump, "mem count failure\n");
4853 return false;
4856 mii->mem_reg0 = reg0;
4857 return find_inc (mii, true) || find_inc (mii, false);
4859 return false;
4862 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4864 /* If REG occurs inside a MEM used in a bit-field reference,
4865 that is unacceptable. */
4866 return false;
4869 /* Time for some deep diving. */
4870 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4872 if (fmt[i] == 'e')
4874 if (find_mem (mii, &XEXP (x, i)))
4875 return true;
4877 else if (fmt[i] == 'E')
4879 int j;
4880 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4881 if (find_mem (mii, &XVECEXP (x, i, j)))
4882 return true;
4885 return false;
4889 /* Examine the instructions between HEAD and TAIL and try to find
4890 dependencies that can be broken by modifying one of the patterns. */
4892 void
4893 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4895 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4896 int success_in_block = 0;
4898 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4900 struct mem_inc_info mii;
4902 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4903 continue;
4905 mii.mem_insn = insn;
4906 if (find_mem (&mii, &PATTERN (insn)))
4907 success_in_block++;
4909 if (success_in_block && sched_verbose >= 5)
4910 fprintf (sched_dump, "%d candidates for address modification found.\n",
4911 success_in_block);
4914 #endif /* INSN_SCHEDULING */