iwmfw - Add version 22 firmwares for AC3168 and AC8265 chipsets.
[dragonfly.git] / contrib / gcc-5.0 / gcc / sched-deps.c
blobce1eac8f0fdb5bc332ffa1b9df2cc12774cd13ad
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "hash-set.h"
30 #include "machmode.h"
31 #include "vec.h"
32 #include "double-int.h"
33 #include "input.h"
34 #include "alias.h"
35 #include "symtab.h"
36 #include "wide-int.h"
37 #include "inchash.h"
38 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
39 #include "tm_p.h"
40 #include "hard-reg-set.h"
41 #include "regs.h"
42 #include "input.h"
43 #include "function.h"
44 #include "flags.h"
45 #include "insn-config.h"
46 #include "insn-attr.h"
47 #include "except.h"
48 #include "recog.h"
49 #include "emit-rtl.h"
50 #include "dominance.h"
51 #include "cfg.h"
52 #include "cfgbuild.h"
53 #include "predict.h"
54 #include "basic-block.h"
55 #include "sched-int.h"
56 #include "params.h"
57 #include "cselib.h"
58 #include "ira.h"
59 #include "ira-int.h"
60 #include "target.h"
62 #ifdef INSN_SCHEDULING
64 #ifdef ENABLE_CHECKING
65 #define CHECK (true)
66 #else
67 #define CHECK (false)
68 #endif
70 /* Holds current parameters for the dependency analyzer. */
71 struct sched_deps_info_def *sched_deps_info;
73 /* The data is specific to the Haifa scheduler. */
74 vec<haifa_deps_insn_data_def>
75 h_d_i_d = vNULL;
77 /* Return the major type present in the DS. */
78 enum reg_note
79 ds_to_dk (ds_t ds)
81 if (ds & DEP_TRUE)
82 return REG_DEP_TRUE;
84 if (ds & DEP_OUTPUT)
85 return REG_DEP_OUTPUT;
87 if (ds & DEP_CONTROL)
88 return REG_DEP_CONTROL;
90 gcc_assert (ds & DEP_ANTI);
92 return REG_DEP_ANTI;
95 /* Return equivalent dep_status. */
96 ds_t
97 dk_to_ds (enum reg_note dk)
99 switch (dk)
101 case REG_DEP_TRUE:
102 return DEP_TRUE;
104 case REG_DEP_OUTPUT:
105 return DEP_OUTPUT;
107 case REG_DEP_CONTROL:
108 return DEP_CONTROL;
110 default:
111 gcc_assert (dk == REG_DEP_ANTI);
112 return DEP_ANTI;
116 /* Functions to operate with dependence information container - dep_t. */
118 /* Init DEP with the arguments. */
119 void
120 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
122 DEP_PRO (dep) = pro;
123 DEP_CON (dep) = con;
124 DEP_TYPE (dep) = type;
125 DEP_STATUS (dep) = ds;
126 DEP_COST (dep) = UNKNOWN_DEP_COST;
127 DEP_NONREG (dep) = 0;
128 DEP_MULTIPLE (dep) = 0;
129 DEP_REPLACE (dep) = NULL;
132 /* Init DEP with the arguments.
133 While most of the scheduler (including targets) only need the major type
134 of the dependency, it is convenient to hide full dep_status from them. */
135 void
136 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
138 ds_t ds;
140 if ((current_sched_info->flags & USE_DEPS_LIST))
141 ds = dk_to_ds (kind);
142 else
143 ds = 0;
145 init_dep_1 (dep, pro, con, kind, ds);
148 /* Make a copy of FROM in TO. */
149 static void
150 copy_dep (dep_t to, dep_t from)
152 memcpy (to, from, sizeof (*to));
155 static void dump_ds (FILE *, ds_t);
157 /* Define flags for dump_dep (). */
159 /* Dump producer of the dependence. */
160 #define DUMP_DEP_PRO (2)
162 /* Dump consumer of the dependence. */
163 #define DUMP_DEP_CON (4)
165 /* Dump type of the dependence. */
166 #define DUMP_DEP_TYPE (8)
168 /* Dump status of the dependence. */
169 #define DUMP_DEP_STATUS (16)
171 /* Dump all information about the dependence. */
172 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
173 |DUMP_DEP_STATUS)
175 /* Dump DEP to DUMP.
176 FLAGS is a bit mask specifying what information about DEP needs
177 to be printed.
178 If FLAGS has the very first bit set, then dump all information about DEP
179 and propagate this bit into the callee dump functions. */
180 static void
181 dump_dep (FILE *dump, dep_t dep, int flags)
183 if (flags & 1)
184 flags |= DUMP_DEP_ALL;
186 fprintf (dump, "<");
188 if (flags & DUMP_DEP_PRO)
189 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
191 if (flags & DUMP_DEP_CON)
192 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
194 if (flags & DUMP_DEP_TYPE)
196 char t;
197 enum reg_note type = DEP_TYPE (dep);
199 switch (type)
201 case REG_DEP_TRUE:
202 t = 't';
203 break;
205 case REG_DEP_OUTPUT:
206 t = 'o';
207 break;
209 case REG_DEP_CONTROL:
210 t = 'c';
211 break;
213 case REG_DEP_ANTI:
214 t = 'a';
215 break;
217 default:
218 gcc_unreachable ();
219 break;
222 fprintf (dump, "%c; ", t);
225 if (flags & DUMP_DEP_STATUS)
227 if (current_sched_info->flags & USE_DEPS_LIST)
228 dump_ds (dump, DEP_STATUS (dep));
231 fprintf (dump, ">");
234 /* Default flags for dump_dep (). */
235 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
237 /* Dump all fields of DEP to STDERR. */
238 void
239 sd_debug_dep (dep_t dep)
241 dump_dep (stderr, dep, 1);
242 fprintf (stderr, "\n");
245 /* Determine whether DEP is a dependency link of a non-debug insn on a
246 debug insn. */
248 static inline bool
249 depl_on_debug_p (dep_link_t dep)
251 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
252 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
255 /* Functions to operate with a single link from the dependencies lists -
256 dep_link_t. */
258 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
259 PREV_NEXT_P. */
260 static void
261 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
263 dep_link_t next = *prev_nextp;
265 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
266 && DEP_LINK_NEXT (l) == NULL);
268 /* Init node being inserted. */
269 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
270 DEP_LINK_NEXT (l) = next;
272 /* Fix next node. */
273 if (next != NULL)
275 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
277 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
280 /* Fix prev node. */
281 *prev_nextp = l;
284 /* Add dep_link LINK to deps_list L. */
285 static void
286 add_to_deps_list (dep_link_t link, deps_list_t l)
288 attach_dep_link (link, &DEPS_LIST_FIRST (l));
290 /* Don't count debug deps. */
291 if (!depl_on_debug_p (link))
292 ++DEPS_LIST_N_LINKS (l);
295 /* Detach dep_link L from the list. */
296 static void
297 detach_dep_link (dep_link_t l)
299 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
300 dep_link_t next = DEP_LINK_NEXT (l);
302 *prev_nextp = next;
304 if (next != NULL)
305 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
307 DEP_LINK_PREV_NEXTP (l) = NULL;
308 DEP_LINK_NEXT (l) = NULL;
311 /* Remove link LINK from list LIST. */
312 static void
313 remove_from_deps_list (dep_link_t link, deps_list_t list)
315 detach_dep_link (link);
317 /* Don't count debug deps. */
318 if (!depl_on_debug_p (link))
319 --DEPS_LIST_N_LINKS (list);
322 /* Move link LINK from list FROM to list TO. */
323 static void
324 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
326 remove_from_deps_list (link, from);
327 add_to_deps_list (link, to);
330 /* Return true of LINK is not attached to any list. */
331 static bool
332 dep_link_is_detached_p (dep_link_t link)
334 return DEP_LINK_PREV_NEXTP (link) == NULL;
337 /* Pool to hold all dependency nodes (dep_node_t). */
338 static alloc_pool dn_pool;
340 /* Number of dep_nodes out there. */
341 static int dn_pool_diff = 0;
343 /* Create a dep_node. */
344 static dep_node_t
345 create_dep_node (void)
347 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
348 dep_link_t back = DEP_NODE_BACK (n);
349 dep_link_t forw = DEP_NODE_FORW (n);
351 DEP_LINK_NODE (back) = n;
352 DEP_LINK_NEXT (back) = NULL;
353 DEP_LINK_PREV_NEXTP (back) = NULL;
355 DEP_LINK_NODE (forw) = n;
356 DEP_LINK_NEXT (forw) = NULL;
357 DEP_LINK_PREV_NEXTP (forw) = NULL;
359 ++dn_pool_diff;
361 return n;
364 /* Delete dep_node N. N must not be connected to any deps_list. */
365 static void
366 delete_dep_node (dep_node_t n)
368 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
369 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
371 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
373 --dn_pool_diff;
375 pool_free (dn_pool, n);
378 /* Pool to hold dependencies lists (deps_list_t). */
379 static alloc_pool dl_pool;
381 /* Number of deps_lists out there. */
382 static int dl_pool_diff = 0;
384 /* Functions to operate with dependences lists - deps_list_t. */
386 /* Return true if list L is empty. */
387 static bool
388 deps_list_empty_p (deps_list_t l)
390 return DEPS_LIST_N_LINKS (l) == 0;
393 /* Create a new deps_list. */
394 static deps_list_t
395 create_deps_list (void)
397 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
399 DEPS_LIST_FIRST (l) = NULL;
400 DEPS_LIST_N_LINKS (l) = 0;
402 ++dl_pool_diff;
403 return l;
406 /* Free deps_list L. */
407 static void
408 free_deps_list (deps_list_t l)
410 gcc_assert (deps_list_empty_p (l));
412 --dl_pool_diff;
414 pool_free (dl_pool, l);
417 /* Return true if there is no dep_nodes and deps_lists out there.
418 After the region is scheduled all the dependency nodes and lists
419 should [generally] be returned to pool. */
420 bool
421 deps_pools_are_empty_p (void)
423 return dn_pool_diff == 0 && dl_pool_diff == 0;
426 /* Remove all elements from L. */
427 static void
428 clear_deps_list (deps_list_t l)
432 dep_link_t link = DEPS_LIST_FIRST (l);
434 if (link == NULL)
435 break;
437 remove_from_deps_list (link, l);
439 while (1);
442 /* Decide whether a dependency should be treated as a hard or a speculative
443 dependency. */
444 static bool
445 dep_spec_p (dep_t dep)
447 if (current_sched_info->flags & DO_SPECULATION)
449 if (DEP_STATUS (dep) & SPECULATIVE)
450 return true;
452 if (current_sched_info->flags & DO_PREDICATION)
454 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
455 return true;
457 if (DEP_REPLACE (dep) != NULL)
458 return true;
459 return false;
462 static regset reg_pending_sets;
463 static regset reg_pending_clobbers;
464 static regset reg_pending_uses;
465 static regset reg_pending_control_uses;
466 static enum reg_pending_barrier_mode reg_pending_barrier;
468 /* Hard registers implicitly clobbered or used (or may be implicitly
469 clobbered or used) by the currently analyzed insn. For example,
470 insn in its constraint has one register class. Even if there is
471 currently no hard register in the insn, the particular hard
472 register will be in the insn after reload pass because the
473 constraint requires it. */
474 static HARD_REG_SET implicit_reg_pending_clobbers;
475 static HARD_REG_SET implicit_reg_pending_uses;
477 /* To speed up the test for duplicate dependency links we keep a
478 record of dependencies created by add_dependence when the average
479 number of instructions in a basic block is very large.
481 Studies have shown that there is typically around 5 instructions between
482 branches for typical C code. So we can make a guess that the average
483 basic block is approximately 5 instructions long; we will choose 100X
484 the average size as a very large basic block.
486 Each insn has associated bitmaps for its dependencies. Each bitmap
487 has enough entries to represent a dependency on any other insn in
488 the insn chain. All bitmap for true dependencies cache is
489 allocated then the rest two ones are also allocated. */
490 static bitmap_head *true_dependency_cache = NULL;
491 static bitmap_head *output_dependency_cache = NULL;
492 static bitmap_head *anti_dependency_cache = NULL;
493 static bitmap_head *control_dependency_cache = NULL;
494 static bitmap_head *spec_dependency_cache = NULL;
495 static int cache_size;
497 /* True if we should mark added dependencies as a non-register deps. */
498 static bool mark_as_hard;
500 static int deps_may_trap_p (const_rtx);
501 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
502 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
503 enum reg_note, bool);
504 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
505 rtx_insn_list **, int, enum reg_note,
506 bool);
507 static void delete_all_dependences (rtx);
508 static void chain_to_prev_insn (rtx_insn *);
510 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
511 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
512 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
513 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
515 static bool sched_has_condition_p (const rtx_insn *);
516 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
518 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
519 rtx, rtx);
520 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
522 #ifdef ENABLE_CHECKING
523 static void check_dep (dep_t, bool);
524 #endif
526 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
528 static int
529 deps_may_trap_p (const_rtx mem)
531 const_rtx addr = XEXP (mem, 0);
533 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
535 const_rtx t = get_reg_known_value (REGNO (addr));
536 if (t)
537 addr = t;
539 return rtx_addr_can_trap_p (addr);
543 /* Find the condition under which INSN is executed. If REV is not NULL,
544 it is set to TRUE when the returned comparison should be reversed
545 to get the actual condition. */
546 static rtx
547 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
549 rtx pat = PATTERN (insn);
550 rtx src;
552 if (rev)
553 *rev = false;
555 if (GET_CODE (pat) == COND_EXEC)
556 return COND_EXEC_TEST (pat);
558 if (!any_condjump_p (insn) || !onlyjump_p (insn))
559 return 0;
561 src = SET_SRC (pc_set (insn));
563 if (XEXP (src, 2) == pc_rtx)
564 return XEXP (src, 0);
565 else if (XEXP (src, 1) == pc_rtx)
567 rtx cond = XEXP (src, 0);
568 enum rtx_code revcode = reversed_comparison_code (cond, insn);
570 if (revcode == UNKNOWN)
571 return 0;
573 if (rev)
574 *rev = true;
575 return cond;
578 return 0;
581 /* Return the condition under which INSN does not execute (i.e. the
582 not-taken condition for a conditional branch), or NULL if we cannot
583 find such a condition. The caller should make a copy of the condition
584 before using it. */
586 sched_get_reverse_condition_uncached (const rtx_insn *insn)
588 bool rev;
589 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
590 if (cond == NULL_RTX)
591 return cond;
592 if (!rev)
594 enum rtx_code revcode = reversed_comparison_code (cond, insn);
595 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
596 XEXP (cond, 0),
597 XEXP (cond, 1));
599 return cond;
602 /* Caching variant of sched_get_condition_with_rev_uncached.
603 We only do actual work the first time we come here for an insn; the
604 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
605 static rtx
606 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
608 bool tmp;
610 if (INSN_LUID (insn) == 0)
611 return sched_get_condition_with_rev_uncached (insn, rev);
613 if (INSN_CACHED_COND (insn) == const_true_rtx)
614 return NULL_RTX;
616 if (INSN_CACHED_COND (insn) != NULL_RTX)
618 if (rev)
619 *rev = INSN_REVERSE_COND (insn);
620 return INSN_CACHED_COND (insn);
623 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
624 INSN_REVERSE_COND (insn) = tmp;
626 if (INSN_CACHED_COND (insn) == NULL_RTX)
628 INSN_CACHED_COND (insn) = const_true_rtx;
629 return NULL_RTX;
632 if (rev)
633 *rev = INSN_REVERSE_COND (insn);
634 return INSN_CACHED_COND (insn);
637 /* True when we can find a condition under which INSN is executed. */
638 static bool
639 sched_has_condition_p (const rtx_insn *insn)
641 return !! sched_get_condition_with_rev (insn, NULL);
646 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
647 static int
648 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
650 if (COMPARISON_P (cond1)
651 && COMPARISON_P (cond2)
652 && GET_CODE (cond1) ==
653 (rev1==rev2
654 ? reversed_comparison_code (cond2, NULL)
655 : GET_CODE (cond2))
656 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
657 && XEXP (cond1, 1) == XEXP (cond2, 1))
658 return 1;
659 return 0;
662 /* Return true if insn1 and insn2 can never depend on one another because
663 the conditions under which they are executed are mutually exclusive. */
664 bool
665 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
667 rtx cond1, cond2;
668 bool rev1 = false, rev2 = false;
670 /* df doesn't handle conditional lifetimes entirely correctly;
671 calls mess up the conditional lifetimes. */
672 if (!CALL_P (insn1) && !CALL_P (insn2))
674 cond1 = sched_get_condition_with_rev (insn1, &rev1);
675 cond2 = sched_get_condition_with_rev (insn2, &rev2);
676 if (cond1 && cond2
677 && conditions_mutex_p (cond1, cond2, rev1, rev2)
678 /* Make sure first instruction doesn't affect condition of second
679 instruction if switched. */
680 && !modified_in_p (cond1, insn2)
681 /* Make sure second instruction doesn't affect condition of first
682 instruction if switched. */
683 && !modified_in_p (cond2, insn1))
684 return true;
686 return false;
690 /* Return true if INSN can potentially be speculated with type DS. */
691 bool
692 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
694 if (HAS_INTERNAL_DEP (insn))
695 return false;
697 if (!NONJUMP_INSN_P (insn))
698 return false;
700 if (SCHED_GROUP_P (insn))
701 return false;
703 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
704 return false;
706 if (side_effects_p (PATTERN (insn)))
707 return false;
709 if (ds & BE_IN_SPEC)
710 /* The following instructions, which depend on a speculatively scheduled
711 instruction, cannot be speculatively scheduled along. */
713 if (may_trap_or_fault_p (PATTERN (insn)))
714 /* If instruction might fault, it cannot be speculatively scheduled.
715 For control speculation it's obvious why and for data speculation
716 it's because the insn might get wrong input if speculation
717 wasn't successful. */
718 return false;
720 if ((ds & BE_IN_DATA)
721 && sched_has_condition_p (insn))
722 /* If this is a predicated instruction, then it cannot be
723 speculatively scheduled. See PR35659. */
724 return false;
727 return true;
730 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
731 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
732 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
733 This function is used to switch sd_iterator to the next list.
734 !!! For internal use only. Might consider moving it to sched-int.h. */
735 void
736 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
737 deps_list_t *list_ptr, bool *resolved_p_ptr)
739 sd_list_types_def types = *types_ptr;
741 if (types & SD_LIST_HARD_BACK)
743 *list_ptr = INSN_HARD_BACK_DEPS (insn);
744 *resolved_p_ptr = false;
745 *types_ptr = types & ~SD_LIST_HARD_BACK;
747 else if (types & SD_LIST_SPEC_BACK)
749 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
750 *resolved_p_ptr = false;
751 *types_ptr = types & ~SD_LIST_SPEC_BACK;
753 else if (types & SD_LIST_FORW)
755 *list_ptr = INSN_FORW_DEPS (insn);
756 *resolved_p_ptr = false;
757 *types_ptr = types & ~SD_LIST_FORW;
759 else if (types & SD_LIST_RES_BACK)
761 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
762 *resolved_p_ptr = true;
763 *types_ptr = types & ~SD_LIST_RES_BACK;
765 else if (types & SD_LIST_RES_FORW)
767 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
768 *resolved_p_ptr = true;
769 *types_ptr = types & ~SD_LIST_RES_FORW;
771 else
773 *list_ptr = NULL;
774 *resolved_p_ptr = false;
775 *types_ptr = SD_LIST_NONE;
779 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
781 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
783 int size = 0;
785 while (list_types != SD_LIST_NONE)
787 deps_list_t list;
788 bool resolved_p;
790 sd_next_list (insn, &list_types, &list, &resolved_p);
791 if (list)
792 size += DEPS_LIST_N_LINKS (list);
795 return size;
798 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
800 bool
801 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
803 while (list_types != SD_LIST_NONE)
805 deps_list_t list;
806 bool resolved_p;
808 sd_next_list (insn, &list_types, &list, &resolved_p);
809 if (!deps_list_empty_p (list))
810 return false;
813 return true;
816 /* Initialize data for INSN. */
817 void
818 sd_init_insn (rtx insn)
820 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
821 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
822 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
823 INSN_FORW_DEPS (insn) = create_deps_list ();
824 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
826 /* ??? It would be nice to allocate dependency caches here. */
829 /* Free data for INSN. */
830 void
831 sd_finish_insn (rtx insn)
833 /* ??? It would be nice to deallocate dependency caches here. */
835 free_deps_list (INSN_HARD_BACK_DEPS (insn));
836 INSN_HARD_BACK_DEPS (insn) = NULL;
838 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
839 INSN_SPEC_BACK_DEPS (insn) = NULL;
841 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
842 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
844 free_deps_list (INSN_FORW_DEPS (insn));
845 INSN_FORW_DEPS (insn) = NULL;
847 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
848 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
851 /* Find a dependency between producer PRO and consumer CON.
852 Search through resolved dependency lists if RESOLVED_P is true.
853 If no such dependency is found return NULL,
854 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
855 with an iterator pointing to it. */
856 static dep_t
857 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
858 sd_iterator_def *sd_it_ptr)
860 sd_list_types_def pro_list_type;
861 sd_list_types_def con_list_type;
862 sd_iterator_def sd_it;
863 dep_t dep;
864 bool found_p = false;
866 if (resolved_p)
868 pro_list_type = SD_LIST_RES_FORW;
869 con_list_type = SD_LIST_RES_BACK;
871 else
873 pro_list_type = SD_LIST_FORW;
874 con_list_type = SD_LIST_BACK;
877 /* Walk through either back list of INSN or forw list of ELEM
878 depending on which one is shorter. */
879 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
881 /* Find the dep_link with producer PRO in consumer's back_deps. */
882 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
883 if (DEP_PRO (dep) == pro)
885 found_p = true;
886 break;
889 else
891 /* Find the dep_link with consumer CON in producer's forw_deps. */
892 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
893 if (DEP_CON (dep) == con)
895 found_p = true;
896 break;
900 if (found_p)
902 if (sd_it_ptr != NULL)
903 *sd_it_ptr = sd_it;
905 return dep;
908 return NULL;
911 /* Find a dependency between producer PRO and consumer CON.
912 Use dependency [if available] to check if dependency is present at all.
913 Search through resolved dependency lists if RESOLVED_P is true.
914 If the dependency or NULL if none found. */
915 dep_t
916 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
918 if (true_dependency_cache != NULL)
919 /* Avoiding the list walk below can cut compile times dramatically
920 for some code. */
922 int elem_luid = INSN_LUID (pro);
923 int insn_luid = INSN_LUID (con);
925 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
926 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
927 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
928 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
929 return NULL;
932 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
935 /* Add or update a dependence described by DEP.
936 MEM1 and MEM2, if non-null, correspond to memory locations in case of
937 data speculation.
939 The function returns a value indicating if an old entry has been changed
940 or a new entry has been added to insn's backward deps.
942 This function merely checks if producer and consumer is the same insn
943 and doesn't create a dep in this case. Actual manipulation of
944 dependence data structures is performed in add_or_update_dep_1. */
945 static enum DEPS_ADJUST_RESULT
946 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
948 rtx_insn *elem = DEP_PRO (dep);
949 rtx_insn *insn = DEP_CON (dep);
951 gcc_assert (INSN_P (insn) && INSN_P (elem));
953 /* Don't depend an insn on itself. */
954 if (insn == elem)
956 if (sched_deps_info->generate_spec_deps)
957 /* INSN has an internal dependence, which we can't overcome. */
958 HAS_INTERNAL_DEP (insn) = 1;
960 return DEP_NODEP;
963 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
966 /* Ask dependency caches what needs to be done for dependence DEP.
967 Return DEP_CREATED if new dependence should be created and there is no
968 need to try to find one searching the dependencies lists.
969 Return DEP_PRESENT if there already is a dependence described by DEP and
970 hence nothing is to be done.
971 Return DEP_CHANGED if there already is a dependence, but it should be
972 updated to incorporate additional information from DEP. */
973 static enum DEPS_ADJUST_RESULT
974 ask_dependency_caches (dep_t dep)
976 int elem_luid = INSN_LUID (DEP_PRO (dep));
977 int insn_luid = INSN_LUID (DEP_CON (dep));
979 gcc_assert (true_dependency_cache != NULL
980 && output_dependency_cache != NULL
981 && anti_dependency_cache != NULL
982 && control_dependency_cache != NULL);
984 if (!(current_sched_info->flags & USE_DEPS_LIST))
986 enum reg_note present_dep_type;
988 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
989 present_dep_type = REG_DEP_TRUE;
990 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
991 present_dep_type = REG_DEP_OUTPUT;
992 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
993 present_dep_type = REG_DEP_ANTI;
994 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
995 present_dep_type = REG_DEP_CONTROL;
996 else
997 /* There is no existing dep so it should be created. */
998 return DEP_CREATED;
1000 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
1001 /* DEP does not add anything to the existing dependence. */
1002 return DEP_PRESENT;
1004 else
1006 ds_t present_dep_types = 0;
1008 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
1009 present_dep_types |= DEP_TRUE;
1010 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
1011 present_dep_types |= DEP_OUTPUT;
1012 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
1013 present_dep_types |= DEP_ANTI;
1014 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1015 present_dep_types |= DEP_CONTROL;
1017 if (present_dep_types == 0)
1018 /* There is no existing dep so it should be created. */
1019 return DEP_CREATED;
1021 if (!(current_sched_info->flags & DO_SPECULATION)
1022 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1024 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1025 == present_dep_types)
1026 /* DEP does not add anything to the existing dependence. */
1027 return DEP_PRESENT;
1029 else
1031 /* Only true dependencies can be data speculative and
1032 only anti dependencies can be control speculative. */
1033 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1034 == present_dep_types);
1036 /* if (DEP is SPECULATIVE) then
1037 ..we should update DEP_STATUS
1038 else
1039 ..we should reset existing dep to non-speculative. */
1043 return DEP_CHANGED;
1046 /* Set dependency caches according to DEP. */
1047 static void
1048 set_dependency_caches (dep_t dep)
1050 int elem_luid = INSN_LUID (DEP_PRO (dep));
1051 int insn_luid = INSN_LUID (DEP_CON (dep));
1053 if (!(current_sched_info->flags & USE_DEPS_LIST))
1055 switch (DEP_TYPE (dep))
1057 case REG_DEP_TRUE:
1058 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1059 break;
1061 case REG_DEP_OUTPUT:
1062 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1063 break;
1065 case REG_DEP_ANTI:
1066 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1067 break;
1069 case REG_DEP_CONTROL:
1070 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1071 break;
1073 default:
1074 gcc_unreachable ();
1077 else
1079 ds_t ds = DEP_STATUS (dep);
1081 if (ds & DEP_TRUE)
1082 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1083 if (ds & DEP_OUTPUT)
1084 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1085 if (ds & DEP_ANTI)
1086 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1087 if (ds & DEP_CONTROL)
1088 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1090 if (ds & SPECULATIVE)
1092 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1093 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1098 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1099 caches accordingly. */
1100 static void
1101 update_dependency_caches (dep_t dep, enum reg_note old_type)
1103 int elem_luid = INSN_LUID (DEP_PRO (dep));
1104 int insn_luid = INSN_LUID (DEP_CON (dep));
1106 /* Clear corresponding cache entry because type of the link
1107 may have changed. Keep them if we use_deps_list. */
1108 if (!(current_sched_info->flags & USE_DEPS_LIST))
1110 switch (old_type)
1112 case REG_DEP_OUTPUT:
1113 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1114 break;
1116 case REG_DEP_ANTI:
1117 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1118 break;
1120 case REG_DEP_CONTROL:
1121 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1122 break;
1124 default:
1125 gcc_unreachable ();
1129 set_dependency_caches (dep);
1132 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1133 static void
1134 change_spec_dep_to_hard (sd_iterator_def sd_it)
1136 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1137 dep_link_t link = DEP_NODE_BACK (node);
1138 dep_t dep = DEP_NODE_DEP (node);
1139 rtx_insn *elem = DEP_PRO (dep);
1140 rtx_insn *insn = DEP_CON (dep);
1142 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1144 DEP_STATUS (dep) &= ~SPECULATIVE;
1146 if (true_dependency_cache != NULL)
1147 /* Clear the cache entry. */
1148 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1149 INSN_LUID (elem));
1152 /* Update DEP to incorporate information from NEW_DEP.
1153 SD_IT points to DEP in case it should be moved to another list.
1154 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1155 data-speculative dependence should be updated. */
1156 static enum DEPS_ADJUST_RESULT
1157 update_dep (dep_t dep, dep_t new_dep,
1158 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1159 rtx mem1 ATTRIBUTE_UNUSED,
1160 rtx mem2 ATTRIBUTE_UNUSED)
1162 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1163 enum reg_note old_type = DEP_TYPE (dep);
1164 bool was_spec = dep_spec_p (dep);
1166 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1167 DEP_MULTIPLE (dep) = 1;
1169 /* If this is a more restrictive type of dependence than the
1170 existing one, then change the existing dependence to this
1171 type. */
1172 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1174 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1175 res = DEP_CHANGED;
1178 if (current_sched_info->flags & USE_DEPS_LIST)
1179 /* Update DEP_STATUS. */
1181 ds_t dep_status = DEP_STATUS (dep);
1182 ds_t ds = DEP_STATUS (new_dep);
1183 ds_t new_status = ds | dep_status;
1185 if (new_status & SPECULATIVE)
1187 /* Either existing dep or a dep we're adding or both are
1188 speculative. */
1189 if (!(ds & SPECULATIVE)
1190 || !(dep_status & SPECULATIVE))
1191 /* The new dep can't be speculative. */
1192 new_status &= ~SPECULATIVE;
1193 else
1195 /* Both are speculative. Merge probabilities. */
1196 if (mem1 != NULL)
1198 dw_t dw;
1200 dw = estimate_dep_weak (mem1, mem2);
1201 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1204 new_status = ds_merge (dep_status, ds);
1208 ds = new_status;
1210 if (dep_status != ds)
1212 DEP_STATUS (dep) = ds;
1213 res = DEP_CHANGED;
1217 if (was_spec && !dep_spec_p (dep))
1218 /* The old dep was speculative, but now it isn't. */
1219 change_spec_dep_to_hard (sd_it);
1221 if (true_dependency_cache != NULL
1222 && res == DEP_CHANGED)
1223 update_dependency_caches (dep, old_type);
1225 return res;
1228 /* Add or update a dependence described by DEP.
1229 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1230 data speculation.
1232 The function returns a value indicating if an old entry has been changed
1233 or a new entry has been added to insn's backward deps or nothing has
1234 been updated at all. */
1235 static enum DEPS_ADJUST_RESULT
1236 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1237 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1239 bool maybe_present_p = true;
1240 bool present_p = false;
1242 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1243 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1245 #ifdef ENABLE_CHECKING
1246 check_dep (new_dep, mem1 != NULL);
1247 #endif
1249 if (true_dependency_cache != NULL)
1251 switch (ask_dependency_caches (new_dep))
1253 case DEP_PRESENT:
1254 dep_t present_dep;
1255 sd_iterator_def sd_it;
1257 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1258 DEP_CON (new_dep),
1259 resolved_p, &sd_it);
1260 DEP_MULTIPLE (present_dep) = 1;
1261 return DEP_PRESENT;
1263 case DEP_CHANGED:
1264 maybe_present_p = true;
1265 present_p = true;
1266 break;
1268 case DEP_CREATED:
1269 maybe_present_p = false;
1270 present_p = false;
1271 break;
1273 default:
1274 gcc_unreachable ();
1275 break;
1279 /* Check that we don't already have this dependence. */
1280 if (maybe_present_p)
1282 dep_t present_dep;
1283 sd_iterator_def sd_it;
1285 gcc_assert (true_dependency_cache == NULL || present_p);
1287 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1288 DEP_CON (new_dep),
1289 resolved_p, &sd_it);
1291 if (present_dep != NULL)
1292 /* We found an existing dependency between ELEM and INSN. */
1293 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1294 else
1295 /* We didn't find a dep, it shouldn't present in the cache. */
1296 gcc_assert (!present_p);
1299 /* Might want to check one level of transitivity to save conses.
1300 This check should be done in maybe_add_or_update_dep_1.
1301 Since we made it to add_or_update_dep_1, we must create
1302 (or update) a link. */
1304 if (mem1 != NULL_RTX)
1306 gcc_assert (sched_deps_info->generate_spec_deps);
1307 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1308 estimate_dep_weak (mem1, mem2));
1311 sd_add_dep (new_dep, resolved_p);
1313 return DEP_CREATED;
1316 /* Initialize BACK_LIST_PTR with consumer's backward list and
1317 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1318 initialize with lists that hold resolved deps. */
1319 static void
1320 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1321 deps_list_t *back_list_ptr,
1322 deps_list_t *forw_list_ptr)
1324 rtx_insn *con = DEP_CON (dep);
1326 if (!resolved_p)
1328 if (dep_spec_p (dep))
1329 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1330 else
1331 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1333 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1335 else
1337 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1338 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1342 /* Add dependence described by DEP.
1343 If RESOLVED_P is true treat the dependence as a resolved one. */
1344 void
1345 sd_add_dep (dep_t dep, bool resolved_p)
1347 dep_node_t n = create_dep_node ();
1348 deps_list_t con_back_deps;
1349 deps_list_t pro_forw_deps;
1350 rtx_insn *elem = DEP_PRO (dep);
1351 rtx_insn *insn = DEP_CON (dep);
1353 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1355 if ((current_sched_info->flags & DO_SPECULATION) == 0
1356 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1357 DEP_STATUS (dep) &= ~SPECULATIVE;
1359 copy_dep (DEP_NODE_DEP (n), dep);
1361 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1363 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1365 #ifdef ENABLE_CHECKING
1366 check_dep (dep, false);
1367 #endif
1369 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1371 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1372 in the bitmap caches of dependency information. */
1373 if (true_dependency_cache != NULL)
1374 set_dependency_caches (dep);
1377 /* Add or update backward dependence between INSN and ELEM
1378 with given type DEP_TYPE and dep_status DS.
1379 This function is a convenience wrapper. */
1380 enum DEPS_ADJUST_RESULT
1381 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1383 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1386 /* Resolved dependence pointed to by SD_IT.
1387 SD_IT will advance to the next element. */
1388 void
1389 sd_resolve_dep (sd_iterator_def sd_it)
1391 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1392 dep_t dep = DEP_NODE_DEP (node);
1393 rtx_insn *pro = DEP_PRO (dep);
1394 rtx_insn *con = DEP_CON (dep);
1396 if (dep_spec_p (dep))
1397 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1398 INSN_RESOLVED_BACK_DEPS (con));
1399 else
1400 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1401 INSN_RESOLVED_BACK_DEPS (con));
1403 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1404 INSN_RESOLVED_FORW_DEPS (pro));
1407 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1408 pointed to by SD_IT to unresolved state. */
1409 void
1410 sd_unresolve_dep (sd_iterator_def sd_it)
1412 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1413 dep_t dep = DEP_NODE_DEP (node);
1414 rtx_insn *pro = DEP_PRO (dep);
1415 rtx_insn *con = DEP_CON (dep);
1417 if (dep_spec_p (dep))
1418 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1419 INSN_SPEC_BACK_DEPS (con));
1420 else
1421 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1422 INSN_HARD_BACK_DEPS (con));
1424 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1425 INSN_FORW_DEPS (pro));
1428 /* Make TO depend on all the FROM's producers.
1429 If RESOLVED_P is true add dependencies to the resolved lists. */
1430 void
1431 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1433 sd_list_types_def list_type;
1434 sd_iterator_def sd_it;
1435 dep_t dep;
1437 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1439 FOR_EACH_DEP (from, list_type, sd_it, dep)
1441 dep_def _new_dep, *new_dep = &_new_dep;
1443 copy_dep (new_dep, dep);
1444 DEP_CON (new_dep) = to;
1445 sd_add_dep (new_dep, resolved_p);
1449 /* Remove a dependency referred to by SD_IT.
1450 SD_IT will point to the next dependence after removal. */
1451 void
1452 sd_delete_dep (sd_iterator_def sd_it)
1454 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1455 dep_t dep = DEP_NODE_DEP (n);
1456 rtx_insn *pro = DEP_PRO (dep);
1457 rtx_insn *con = DEP_CON (dep);
1458 deps_list_t con_back_deps;
1459 deps_list_t pro_forw_deps;
1461 if (true_dependency_cache != NULL)
1463 int elem_luid = INSN_LUID (pro);
1464 int insn_luid = INSN_LUID (con);
1466 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1467 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1468 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1469 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1471 if (current_sched_info->flags & DO_SPECULATION)
1472 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1475 get_back_and_forw_lists (dep, sd_it.resolved_p,
1476 &con_back_deps, &pro_forw_deps);
1478 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1479 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1481 delete_dep_node (n);
1484 /* Dump size of the lists. */
1485 #define DUMP_LISTS_SIZE (2)
1487 /* Dump dependencies of the lists. */
1488 #define DUMP_LISTS_DEPS (4)
1490 /* Dump all information about the lists. */
1491 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1493 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1494 FLAGS is a bit mask specifying what information about the lists needs
1495 to be printed.
1496 If FLAGS has the very first bit set, then dump all information about
1497 the lists and propagate this bit into the callee dump functions. */
1498 static void
1499 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1501 sd_iterator_def sd_it;
1502 dep_t dep;
1503 int all;
1505 all = (flags & 1);
1507 if (all)
1508 flags |= DUMP_LISTS_ALL;
1510 fprintf (dump, "[");
1512 if (flags & DUMP_LISTS_SIZE)
1513 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1515 if (flags & DUMP_LISTS_DEPS)
1517 FOR_EACH_DEP (insn, types, sd_it, dep)
1519 dump_dep (dump, dep, dump_dep_flags | all);
1520 fprintf (dump, " ");
1525 /* Dump all information about deps_lists of INSN specified by TYPES
1526 to STDERR. */
1527 void
1528 sd_debug_lists (rtx insn, sd_list_types_def types)
1530 dump_lists (stderr, insn, types, 1);
1531 fprintf (stderr, "\n");
1534 /* A wrapper around add_dependence_1, to add a dependence of CON on
1535 PRO, with type DEP_TYPE. This function implements special handling
1536 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1537 the type to REG_DEP_ANTI if we can determine that predication is
1538 impossible; otherwise we add additional true dependencies on the
1539 INSN_COND_DEPS list of the jump (which PRO must be). */
1540 void
1541 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1543 if (dep_type == REG_DEP_CONTROL
1544 && !(current_sched_info->flags & DO_PREDICATION))
1545 dep_type = REG_DEP_ANTI;
1547 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1548 so we must also make the insn dependent on the setter of the
1549 condition. */
1550 if (dep_type == REG_DEP_CONTROL)
1552 rtx_insn *real_pro = pro;
1553 rtx_insn *other = real_insn_for_shadow (real_pro);
1554 rtx cond;
1556 if (other != NULL_RTX)
1557 real_pro = other;
1558 cond = sched_get_reverse_condition_uncached (real_pro);
1559 /* Verify that the insn does not use a different value in
1560 the condition register than the one that was present at
1561 the jump. */
1562 if (cond == NULL_RTX)
1563 dep_type = REG_DEP_ANTI;
1564 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1566 HARD_REG_SET uses;
1567 CLEAR_HARD_REG_SET (uses);
1568 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1569 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1570 dep_type = REG_DEP_ANTI;
1572 if (dep_type == REG_DEP_CONTROL)
1574 if (sched_verbose >= 5)
1575 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1576 INSN_UID (real_pro));
1577 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1578 REG_DEP_TRUE, false);
1582 add_dependence_1 (con, pro, dep_type);
1585 /* A convenience wrapper to operate on an entire list. HARD should be
1586 true if DEP_NONREG should be set on newly created dependencies. */
1588 static void
1589 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1590 enum reg_note dep_type, bool hard)
1592 mark_as_hard = hard;
1593 for (; list; list = list->next ())
1595 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1596 add_dependence (insn, list->insn (), dep_type);
1598 mark_as_hard = false;
1601 /* Similar, but free *LISTP at the same time, when the context
1602 is not readonly. HARD should be true if DEP_NONREG should be set on
1603 newly created dependencies. */
1605 static void
1606 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1607 rtx_insn_list **listp,
1608 int uncond, enum reg_note dep_type, bool hard)
1610 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1612 /* We don't want to short-circuit dependencies involving debug
1613 insns, because they may cause actual dependencies to be
1614 disregarded. */
1615 if (deps->readonly || DEBUG_INSN_P (insn))
1616 return;
1618 free_INSN_LIST_list (listp);
1621 /* Remove all occurrences of INSN from LIST. Return the number of
1622 occurrences removed. */
1624 static int
1625 remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
1627 int removed = 0;
1629 while (*listp)
1631 if ((*listp)->insn () == insn)
1633 remove_free_INSN_LIST_node (listp);
1634 removed++;
1635 continue;
1638 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1641 return removed;
1644 /* Same as above, but process two lists at once. */
1645 static int
1646 remove_from_both_dependence_lists (rtx insn,
1647 rtx_insn_list **listp,
1648 rtx_expr_list **exprp)
1650 int removed = 0;
1652 while (*listp)
1654 if (XEXP (*listp, 0) == insn)
1656 remove_free_INSN_LIST_node (listp);
1657 remove_free_EXPR_LIST_node (exprp);
1658 removed++;
1659 continue;
1662 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1663 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1666 return removed;
1669 /* Clear all dependencies for an insn. */
1670 static void
1671 delete_all_dependences (rtx insn)
1673 sd_iterator_def sd_it;
1674 dep_t dep;
1676 /* The below cycle can be optimized to clear the caches and back_deps
1677 in one call but that would provoke duplication of code from
1678 delete_dep (). */
1680 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1681 sd_iterator_cond (&sd_it, &dep);)
1682 sd_delete_dep (sd_it);
1685 /* All insns in a scheduling group except the first should only have
1686 dependencies on the previous insn in the group. So we find the
1687 first instruction in the scheduling group by walking the dependence
1688 chains backwards. Then we add the dependencies for the group to
1689 the previous nonnote insn. */
1691 static void
1692 chain_to_prev_insn (rtx_insn *insn)
1694 sd_iterator_def sd_it;
1695 dep_t dep;
1696 rtx_insn *prev_nonnote;
1698 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1700 rtx_insn *i = insn;
1701 rtx_insn *pro = DEP_PRO (dep);
1705 i = prev_nonnote_insn (i);
1707 if (pro == i)
1708 goto next_link;
1709 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1711 if (! sched_insns_conditions_mutex_p (i, pro))
1712 add_dependence (i, pro, DEP_TYPE (dep));
1713 next_link:;
1716 delete_all_dependences (insn);
1718 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1719 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1720 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1721 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1724 /* Process an insn's memory dependencies. There are four kinds of
1725 dependencies:
1727 (0) read dependence: read follows read
1728 (1) true dependence: read follows write
1729 (2) output dependence: write follows write
1730 (3) anti dependence: write follows read
1732 We are careful to build only dependencies which actually exist, and
1733 use transitivity to avoid building too many links. */
1735 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1736 The MEM is a memory reference contained within INSN, which we are saving
1737 so that we can do memory aliasing on it. */
1739 static void
1740 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1741 rtx_insn *insn, rtx mem)
1743 rtx_insn_list **insn_list;
1744 rtx_insn_list *insn_node;
1745 rtx_expr_list **mem_list;
1746 rtx_expr_list *mem_node;
1748 gcc_assert (!deps->readonly);
1749 if (read_p)
1751 insn_list = &deps->pending_read_insns;
1752 mem_list = &deps->pending_read_mems;
1753 if (!DEBUG_INSN_P (insn))
1754 deps->pending_read_list_length++;
1756 else
1758 insn_list = &deps->pending_write_insns;
1759 mem_list = &deps->pending_write_mems;
1760 deps->pending_write_list_length++;
1763 insn_node = alloc_INSN_LIST (insn, *insn_list);
1764 *insn_list = insn_node;
1766 if (sched_deps_info->use_cselib)
1768 mem = shallow_copy_rtx (mem);
1769 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1770 GET_MODE (mem), insn);
1772 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1773 *mem_list = mem_node;
1776 /* Make a dependency between every memory reference on the pending lists
1777 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1778 dependencies for a read operation, similarly with FOR_WRITE. */
1780 static void
1781 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1782 int for_write)
1784 if (for_write)
1786 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1787 1, REG_DEP_ANTI, true);
1788 if (!deps->readonly)
1790 free_EXPR_LIST_list (&deps->pending_read_mems);
1791 deps->pending_read_list_length = 0;
1795 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1796 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1797 true);
1799 add_dependence_list_and_free (deps, insn,
1800 &deps->last_pending_memory_flush, 1,
1801 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1802 true);
1804 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1805 REG_DEP_ANTI, true);
1807 if (DEBUG_INSN_P (insn))
1809 if (for_write)
1810 free_INSN_LIST_list (&deps->pending_read_insns);
1811 free_INSN_LIST_list (&deps->pending_write_insns);
1812 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1813 free_INSN_LIST_list (&deps->pending_jump_insns);
1816 if (!deps->readonly)
1818 free_EXPR_LIST_list (&deps->pending_write_mems);
1819 deps->pending_write_list_length = 0;
1821 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1822 deps->pending_flush_length = 1;
1824 mark_as_hard = false;
1827 /* Instruction which dependencies we are analyzing. */
1828 static rtx_insn *cur_insn = NULL;
1830 /* Implement hooks for haifa scheduler. */
1832 static void
1833 haifa_start_insn (rtx_insn *insn)
1835 gcc_assert (insn && !cur_insn);
1837 cur_insn = insn;
1840 static void
1841 haifa_finish_insn (void)
1843 cur_insn = NULL;
1846 void
1847 haifa_note_reg_set (int regno)
1849 SET_REGNO_REG_SET (reg_pending_sets, regno);
1852 void
1853 haifa_note_reg_clobber (int regno)
1855 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1858 void
1859 haifa_note_reg_use (int regno)
1861 SET_REGNO_REG_SET (reg_pending_uses, regno);
1864 static void
1865 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1867 if (!(ds & SPECULATIVE))
1869 mem = NULL_RTX;
1870 pending_mem = NULL_RTX;
1872 else
1873 gcc_assert (ds & BEGIN_DATA);
1876 dep_def _dep, *dep = &_dep;
1878 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1879 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1880 DEP_NONREG (dep) = 1;
1881 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1886 static void
1887 haifa_note_dep (rtx_insn *elem, ds_t ds)
1889 dep_def _dep;
1890 dep_t dep = &_dep;
1892 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1893 if (mark_as_hard)
1894 DEP_NONREG (dep) = 1;
1895 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1898 static void
1899 note_reg_use (int r)
1901 if (sched_deps_info->note_reg_use)
1902 sched_deps_info->note_reg_use (r);
1905 static void
1906 note_reg_set (int r)
1908 if (sched_deps_info->note_reg_set)
1909 sched_deps_info->note_reg_set (r);
1912 static void
1913 note_reg_clobber (int r)
1915 if (sched_deps_info->note_reg_clobber)
1916 sched_deps_info->note_reg_clobber (r);
1919 static void
1920 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1922 if (sched_deps_info->note_mem_dep)
1923 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1926 static void
1927 note_dep (rtx_insn *e, ds_t ds)
1929 if (sched_deps_info->note_dep)
1930 sched_deps_info->note_dep (e, ds);
1933 /* Return corresponding to DS reg_note. */
1934 enum reg_note
1935 ds_to_dt (ds_t ds)
1937 if (ds & DEP_TRUE)
1938 return REG_DEP_TRUE;
1939 else if (ds & DEP_OUTPUT)
1940 return REG_DEP_OUTPUT;
1941 else if (ds & DEP_ANTI)
1942 return REG_DEP_ANTI;
1943 else
1945 gcc_assert (ds & DEP_CONTROL);
1946 return REG_DEP_CONTROL;
1952 /* Functions for computation of info needed for register pressure
1953 sensitive insn scheduling. */
1956 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1957 static struct reg_use_data *
1958 create_insn_reg_use (int regno, rtx_insn *insn)
1960 struct reg_use_data *use;
1962 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1963 use->regno = regno;
1964 use->insn = insn;
1965 use->next_insn_use = INSN_REG_USE_LIST (insn);
1966 INSN_REG_USE_LIST (insn) = use;
1967 return use;
1970 /* Allocate reg_set_data structure for REGNO and INSN. */
1971 static void
1972 create_insn_reg_set (int regno, rtx insn)
1974 struct reg_set_data *set;
1976 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1977 set->regno = regno;
1978 set->insn = insn;
1979 set->next_insn_set = INSN_REG_SET_LIST (insn);
1980 INSN_REG_SET_LIST (insn) = set;
1983 /* Set up insn register uses for INSN and dependency context DEPS. */
1984 static void
1985 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1987 unsigned i;
1988 reg_set_iterator rsi;
1989 struct reg_use_data *use, *use2, *next;
1990 struct deps_reg *reg_last;
1992 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1994 if (i < FIRST_PSEUDO_REGISTER
1995 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1996 continue;
1998 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1999 && ! REGNO_REG_SET_P (reg_pending_sets, i)
2000 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
2001 /* Ignore use which is not dying. */
2002 continue;
2004 use = create_insn_reg_use (i, insn);
2005 use->next_regno_use = use;
2006 reg_last = &deps->reg_last[i];
2008 /* Create the cycle list of uses. */
2009 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
2011 use2 = create_insn_reg_use (i, list->insn ());
2012 next = use->next_regno_use;
2013 use->next_regno_use = use2;
2014 use2->next_regno_use = next;
2019 /* Register pressure info for the currently processed insn. */
2020 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2022 /* Return TRUE if INSN has the use structure for REGNO. */
2023 static bool
2024 insn_use_p (rtx insn, int regno)
2026 struct reg_use_data *use;
2028 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2029 if (use->regno == regno)
2030 return true;
2031 return false;
2034 /* Update the register pressure info after birth of pseudo register REGNO
2035 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2036 the register is in clobber or unused after the insn. */
2037 static void
2038 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2040 int incr, new_incr;
2041 enum reg_class cl;
2043 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2044 cl = sched_regno_pressure_class[regno];
2045 if (cl != NO_REGS)
2047 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2048 if (clobber_p)
2050 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2051 reg_pressure_info[cl].clobber_increase = new_incr;
2053 else if (unused_p)
2055 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2056 reg_pressure_info[cl].unused_set_increase = new_incr;
2058 else
2060 new_incr = reg_pressure_info[cl].set_increase + incr;
2061 reg_pressure_info[cl].set_increase = new_incr;
2062 if (! insn_use_p (insn, regno))
2063 reg_pressure_info[cl].change += incr;
2064 create_insn_reg_set (regno, insn);
2066 gcc_assert (new_incr < (1 << INCREASE_BITS));
2070 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2071 hard registers involved in the birth. */
2072 static void
2073 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2074 bool clobber_p, bool unused_p)
2076 enum reg_class cl;
2077 int new_incr, last = regno + nregs;
2079 while (regno < last)
2081 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2082 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2084 cl = sched_regno_pressure_class[regno];
2085 if (cl != NO_REGS)
2087 if (clobber_p)
2089 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2090 reg_pressure_info[cl].clobber_increase = new_incr;
2092 else if (unused_p)
2094 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2095 reg_pressure_info[cl].unused_set_increase = new_incr;
2097 else
2099 new_incr = reg_pressure_info[cl].set_increase + 1;
2100 reg_pressure_info[cl].set_increase = new_incr;
2101 if (! insn_use_p (insn, regno))
2102 reg_pressure_info[cl].change += 1;
2103 create_insn_reg_set (regno, insn);
2105 gcc_assert (new_incr < (1 << INCREASE_BITS));
2108 regno++;
2112 /* Update the register pressure info after birth of pseudo or hard
2113 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2114 correspondingly that the register is in clobber or unused after the
2115 insn. */
2116 static void
2117 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2119 int regno;
2121 if (GET_CODE (reg) == SUBREG)
2122 reg = SUBREG_REG (reg);
2124 if (! REG_P (reg))
2125 return;
2127 regno = REGNO (reg);
2128 if (regno < FIRST_PSEUDO_REGISTER)
2129 mark_insn_hard_regno_birth (insn, regno,
2130 hard_regno_nregs[regno][GET_MODE (reg)],
2131 clobber_p, unused_p);
2132 else
2133 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2136 /* Update the register pressure info after death of pseudo register
2137 REGNO. */
2138 static void
2139 mark_pseudo_death (int regno)
2141 int incr;
2142 enum reg_class cl;
2144 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2145 cl = sched_regno_pressure_class[regno];
2146 if (cl != NO_REGS)
2148 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2149 reg_pressure_info[cl].change -= incr;
2153 /* Like mark_pseudo_death except that NREGS saying how many hard
2154 registers involved in the death. */
2155 static void
2156 mark_hard_regno_death (int regno, int nregs)
2158 enum reg_class cl;
2159 int last = regno + nregs;
2161 while (regno < last)
2163 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2164 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2166 cl = sched_regno_pressure_class[regno];
2167 if (cl != NO_REGS)
2168 reg_pressure_info[cl].change -= 1;
2170 regno++;
2174 /* Update the register pressure info after death of pseudo or hard
2175 register REG. */
2176 static void
2177 mark_reg_death (rtx reg)
2179 int regno;
2181 if (GET_CODE (reg) == SUBREG)
2182 reg = SUBREG_REG (reg);
2184 if (! REG_P (reg))
2185 return;
2187 regno = REGNO (reg);
2188 if (regno < FIRST_PSEUDO_REGISTER)
2189 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2190 else
2191 mark_pseudo_death (regno);
2194 /* Process SETTER of REG. DATA is an insn containing the setter. */
2195 static void
2196 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2198 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2199 return;
2200 mark_insn_reg_birth
2201 ((rtx) data, reg, false,
2202 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2205 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2206 static void
2207 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2209 if (GET_CODE (setter) == CLOBBER)
2210 mark_insn_reg_birth ((rtx) data, reg, true, false);
2213 /* Set up reg pressure info related to INSN. */
2214 void
2215 init_insn_reg_pressure_info (rtx insn)
2217 int i, len;
2218 enum reg_class cl;
2219 static struct reg_pressure_data *pressure_info;
2220 rtx link;
2222 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2224 if (! INSN_P (insn))
2225 return;
2227 for (i = 0; i < ira_pressure_classes_num; i++)
2229 cl = ira_pressure_classes[i];
2230 reg_pressure_info[cl].clobber_increase = 0;
2231 reg_pressure_info[cl].set_increase = 0;
2232 reg_pressure_info[cl].unused_set_increase = 0;
2233 reg_pressure_info[cl].change = 0;
2236 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2238 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2240 #ifdef AUTO_INC_DEC
2241 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2242 if (REG_NOTE_KIND (link) == REG_INC)
2243 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2244 #endif
2246 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2247 if (REG_NOTE_KIND (link) == REG_DEAD)
2248 mark_reg_death (XEXP (link, 0));
2250 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2251 pressure_info
2252 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2253 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2254 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2255 * sizeof (int), 1);
2256 for (i = 0; i < ira_pressure_classes_num; i++)
2258 cl = ira_pressure_classes[i];
2259 pressure_info[i].clobber_increase
2260 = reg_pressure_info[cl].clobber_increase;
2261 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2262 pressure_info[i].unused_set_increase
2263 = reg_pressure_info[cl].unused_set_increase;
2264 pressure_info[i].change = reg_pressure_info[cl].change;
2271 /* Internal variable for sched_analyze_[12] () functions.
2272 If it is nonzero, this means that sched_analyze_[12] looks
2273 at the most toplevel SET. */
2274 static bool can_start_lhs_rhs_p;
2276 /* Extend reg info for the deps context DEPS given that
2277 we have just generated a register numbered REGNO. */
2278 static void
2279 extend_deps_reg_info (struct deps_desc *deps, int regno)
2281 int max_regno = regno + 1;
2283 gcc_assert (!reload_completed);
2285 /* In a readonly context, it would not hurt to extend info,
2286 but it should not be needed. */
2287 if (reload_completed && deps->readonly)
2289 deps->max_reg = max_regno;
2290 return;
2293 if (max_regno > deps->max_reg)
2295 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2296 max_regno);
2297 memset (&deps->reg_last[deps->max_reg],
2298 0, (max_regno - deps->max_reg)
2299 * sizeof (struct deps_reg));
2300 deps->max_reg = max_regno;
2304 /* Extends REG_INFO_P if needed. */
2305 void
2306 maybe_extend_reg_info_p (void)
2308 /* Extend REG_INFO_P, if needed. */
2309 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2311 size_t new_reg_info_p_size = max_regno + 128;
2313 gcc_assert (!reload_completed && sel_sched_p ());
2315 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2316 new_reg_info_p_size,
2317 reg_info_p_size,
2318 sizeof (*reg_info_p));
2319 reg_info_p_size = new_reg_info_p_size;
2323 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2324 The type of the reference is specified by REF and can be SET,
2325 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2327 static void
2328 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2329 enum rtx_code ref, rtx_insn *insn)
2331 /* We could emit new pseudos in renaming. Extend the reg structures. */
2332 if (!reload_completed && sel_sched_p ()
2333 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2334 extend_deps_reg_info (deps, regno);
2336 maybe_extend_reg_info_p ();
2338 /* A hard reg in a wide mode may really be multiple registers.
2339 If so, mark all of them just like the first. */
2340 if (regno < FIRST_PSEUDO_REGISTER)
2342 int i = hard_regno_nregs[regno][mode];
2343 if (ref == SET)
2345 while (--i >= 0)
2346 note_reg_set (regno + i);
2348 else if (ref == USE)
2350 while (--i >= 0)
2351 note_reg_use (regno + i);
2353 else
2355 while (--i >= 0)
2356 note_reg_clobber (regno + i);
2360 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2361 it does not reload. Ignore these as they have served their
2362 purpose already. */
2363 else if (regno >= deps->max_reg)
2365 enum rtx_code code = GET_CODE (PATTERN (insn));
2366 gcc_assert (code == USE || code == CLOBBER);
2369 else
2371 if (ref == SET)
2372 note_reg_set (regno);
2373 else if (ref == USE)
2374 note_reg_use (regno);
2375 else
2376 note_reg_clobber (regno);
2378 /* Pseudos that are REG_EQUIV to something may be replaced
2379 by that during reloading. We need only add dependencies for
2380 the address in the REG_EQUIV note. */
2381 if (!reload_completed && get_reg_known_equiv_p (regno))
2383 rtx t = get_reg_known_value (regno);
2384 if (MEM_P (t))
2385 sched_analyze_2 (deps, XEXP (t, 0), insn);
2388 /* Don't let it cross a call after scheduling if it doesn't
2389 already cross one. */
2390 if (REG_N_CALLS_CROSSED (regno) == 0)
2392 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2393 deps->sched_before_next_call
2394 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2395 else
2396 add_dependence_list (insn, deps->last_function_call, 1,
2397 REG_DEP_ANTI, false);
2402 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2403 rtx, X, creating all dependencies generated by the write to the
2404 destination of X, and reads of everything mentioned. */
2406 static void
2407 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2409 rtx dest = XEXP (x, 0);
2410 enum rtx_code code = GET_CODE (x);
2411 bool cslr_p = can_start_lhs_rhs_p;
2413 can_start_lhs_rhs_p = false;
2415 gcc_assert (dest);
2416 if (dest == 0)
2417 return;
2419 if (cslr_p && sched_deps_info->start_lhs)
2420 sched_deps_info->start_lhs (dest);
2422 if (GET_CODE (dest) == PARALLEL)
2424 int i;
2426 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2427 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2428 sched_analyze_1 (deps,
2429 gen_rtx_CLOBBER (VOIDmode,
2430 XEXP (XVECEXP (dest, 0, i), 0)),
2431 insn);
2433 if (cslr_p && sched_deps_info->finish_lhs)
2434 sched_deps_info->finish_lhs ();
2436 if (code == SET)
2438 can_start_lhs_rhs_p = cslr_p;
2440 sched_analyze_2 (deps, SET_SRC (x), insn);
2442 can_start_lhs_rhs_p = false;
2445 return;
2448 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2449 || GET_CODE (dest) == ZERO_EXTRACT)
2451 if (GET_CODE (dest) == STRICT_LOW_PART
2452 || GET_CODE (dest) == ZERO_EXTRACT
2453 || df_read_modify_subreg_p (dest))
2455 /* These both read and modify the result. We must handle
2456 them as writes to get proper dependencies for following
2457 instructions. We must handle them as reads to get proper
2458 dependencies from this to previous instructions.
2459 Thus we need to call sched_analyze_2. */
2461 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2463 if (GET_CODE (dest) == ZERO_EXTRACT)
2465 /* The second and third arguments are values read by this insn. */
2466 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2467 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2469 dest = XEXP (dest, 0);
2472 if (REG_P (dest))
2474 int regno = REGNO (dest);
2475 machine_mode mode = GET_MODE (dest);
2477 sched_analyze_reg (deps, regno, mode, code, insn);
2479 #ifdef STACK_REGS
2480 /* Treat all writes to a stack register as modifying the TOS. */
2481 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2483 /* Avoid analyzing the same register twice. */
2484 if (regno != FIRST_STACK_REG)
2485 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2487 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2488 FIRST_STACK_REG);
2490 #endif
2492 else if (MEM_P (dest))
2494 /* Writing memory. */
2495 rtx t = dest;
2497 if (sched_deps_info->use_cselib)
2499 machine_mode address_mode = get_address_mode (dest);
2501 t = shallow_copy_rtx (dest);
2502 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2503 GET_MODE (t), insn);
2504 XEXP (t, 0)
2505 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2506 insn);
2508 t = canon_rtx (t);
2510 /* Pending lists can't get larger with a readonly context. */
2511 if (!deps->readonly
2512 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2513 >= MAX_PENDING_LIST_LENGTH))
2515 /* Flush all pending reads and writes to prevent the pending lists
2516 from getting any larger. Insn scheduling runs too slowly when
2517 these lists get long. When compiling GCC with itself,
2518 this flush occurs 8 times for sparc, and 10 times for m88k using
2519 the default value of 32. */
2520 flush_pending_lists (deps, insn, false, true);
2522 else
2524 rtx_insn_list *pending;
2525 rtx_expr_list *pending_mem;
2527 pending = deps->pending_read_insns;
2528 pending_mem = deps->pending_read_mems;
2529 while (pending)
2531 if (anti_dependence (pending_mem->element (), t)
2532 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2533 note_mem_dep (t, pending_mem->element (), pending->insn (),
2534 DEP_ANTI);
2536 pending = pending->next ();
2537 pending_mem = pending_mem->next ();
2540 pending = deps->pending_write_insns;
2541 pending_mem = deps->pending_write_mems;
2542 while (pending)
2544 if (output_dependence (pending_mem->element (), t)
2545 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2546 note_mem_dep (t, pending_mem->element (),
2547 pending->insn (),
2548 DEP_OUTPUT);
2550 pending = pending->next ();
2551 pending_mem = pending_mem-> next ();
2554 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2555 REG_DEP_ANTI, true);
2556 add_dependence_list (insn, deps->pending_jump_insns, 1,
2557 REG_DEP_CONTROL, true);
2559 if (!deps->readonly)
2560 add_insn_mem_dependence (deps, false, insn, dest);
2562 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2565 if (cslr_p && sched_deps_info->finish_lhs)
2566 sched_deps_info->finish_lhs ();
2568 /* Analyze reads. */
2569 if (GET_CODE (x) == SET)
2571 can_start_lhs_rhs_p = cslr_p;
2573 sched_analyze_2 (deps, SET_SRC (x), insn);
2575 can_start_lhs_rhs_p = false;
2579 /* Analyze the uses of memory and registers in rtx X in INSN. */
2580 static void
2581 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2583 int i;
2584 int j;
2585 enum rtx_code code;
2586 const char *fmt;
2587 bool cslr_p = can_start_lhs_rhs_p;
2589 can_start_lhs_rhs_p = false;
2591 gcc_assert (x);
2592 if (x == 0)
2593 return;
2595 if (cslr_p && sched_deps_info->start_rhs)
2596 sched_deps_info->start_rhs (x);
2598 code = GET_CODE (x);
2600 switch (code)
2602 CASE_CONST_ANY:
2603 case SYMBOL_REF:
2604 case CONST:
2605 case LABEL_REF:
2606 /* Ignore constants. */
2607 if (cslr_p && sched_deps_info->finish_rhs)
2608 sched_deps_info->finish_rhs ();
2610 return;
2612 #ifdef HAVE_cc0
2613 case CC0:
2614 /* User of CC0 depends on immediately preceding insn. */
2615 SCHED_GROUP_P (insn) = 1;
2616 /* Don't move CC0 setter to another block (it can set up the
2617 same flag for previous CC0 users which is safe). */
2618 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2620 if (cslr_p && sched_deps_info->finish_rhs)
2621 sched_deps_info->finish_rhs ();
2623 return;
2624 #endif
2626 case REG:
2628 int regno = REGNO (x);
2629 machine_mode mode = GET_MODE (x);
2631 sched_analyze_reg (deps, regno, mode, USE, insn);
2633 #ifdef STACK_REGS
2634 /* Treat all reads of a stack register as modifying the TOS. */
2635 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2637 /* Avoid analyzing the same register twice. */
2638 if (regno != FIRST_STACK_REG)
2639 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2640 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2642 #endif
2644 if (cslr_p && sched_deps_info->finish_rhs)
2645 sched_deps_info->finish_rhs ();
2647 return;
2650 case MEM:
2652 /* Reading memory. */
2653 rtx u;
2654 rtx_insn_list *pending;
2655 rtx_expr_list *pending_mem;
2656 rtx t = x;
2658 if (sched_deps_info->use_cselib)
2660 machine_mode address_mode = get_address_mode (t);
2662 t = shallow_copy_rtx (t);
2663 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2664 GET_MODE (t), insn);
2665 XEXP (t, 0)
2666 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2667 insn);
2670 if (!DEBUG_INSN_P (insn))
2672 t = canon_rtx (t);
2673 pending = deps->pending_read_insns;
2674 pending_mem = deps->pending_read_mems;
2675 while (pending)
2677 if (read_dependence (pending_mem->element (), t)
2678 && ! sched_insns_conditions_mutex_p (insn,
2679 pending->insn ()))
2680 note_mem_dep (t, pending_mem->element (),
2681 pending->insn (),
2682 DEP_ANTI);
2684 pending = pending->next ();
2685 pending_mem = pending_mem->next ();
2688 pending = deps->pending_write_insns;
2689 pending_mem = deps->pending_write_mems;
2690 while (pending)
2692 if (true_dependence (pending_mem->element (), VOIDmode, t)
2693 && ! sched_insns_conditions_mutex_p (insn,
2694 pending->insn ()))
2695 note_mem_dep (t, pending_mem->element (),
2696 pending->insn (),
2697 sched_deps_info->generate_spec_deps
2698 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2700 pending = pending->next ();
2701 pending_mem = pending_mem->next ();
2704 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2705 add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
2706 REG_DEP_ANTI);
2708 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2709 if (deps_may_trap_p (x))
2711 if ((sched_deps_info->generate_spec_deps)
2712 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2714 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2715 MAX_DEP_WEAK);
2717 note_dep (as_a <rtx_insn *> (XEXP (u, 0)), ds);
2719 else
2720 add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
2721 REG_DEP_CONTROL);
2725 /* Always add these dependencies to pending_reads, since
2726 this insn may be followed by a write. */
2727 if (!deps->readonly)
2729 if ((deps->pending_read_list_length
2730 + deps->pending_write_list_length)
2731 >= MAX_PENDING_LIST_LENGTH
2732 && !DEBUG_INSN_P (insn))
2733 flush_pending_lists (deps, insn, true, true);
2734 add_insn_mem_dependence (deps, true, insn, x);
2737 sched_analyze_2 (deps, XEXP (x, 0), insn);
2739 if (cslr_p && sched_deps_info->finish_rhs)
2740 sched_deps_info->finish_rhs ();
2742 return;
2745 /* Force pending stores to memory in case a trap handler needs them.
2746 Also force pending loads from memory; loads and stores can segfault
2747 and the signal handler won't be triggered if the trap insn was moved
2748 above load or store insn. */
2749 case TRAP_IF:
2750 flush_pending_lists (deps, insn, true, true);
2751 break;
2753 case PREFETCH:
2754 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2755 reg_pending_barrier = TRUE_BARRIER;
2756 /* Prefetch insn contains addresses only. So if the prefetch
2757 address has no registers, there will be no dependencies on
2758 the prefetch insn. This is wrong with result code
2759 correctness point of view as such prefetch can be moved below
2760 a jump insn which usually generates MOVE_BARRIER preventing
2761 to move insns containing registers or memories through the
2762 barrier. It is also wrong with generated code performance
2763 point of view as prefetch withouth dependecies will have a
2764 tendency to be issued later instead of earlier. It is hard
2765 to generate accurate dependencies for prefetch insns as
2766 prefetch has only the start address but it is better to have
2767 something than nothing. */
2768 if (!deps->readonly)
2770 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2771 if (sched_deps_info->use_cselib)
2772 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2773 add_insn_mem_dependence (deps, true, insn, x);
2775 break;
2777 case UNSPEC_VOLATILE:
2778 flush_pending_lists (deps, insn, true, true);
2779 /* FALLTHRU */
2781 case ASM_OPERANDS:
2782 case ASM_INPUT:
2784 /* Traditional and volatile asm instructions must be considered to use
2785 and clobber all hard registers, all pseudo-registers and all of
2786 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2788 Consider for instance a volatile asm that changes the fpu rounding
2789 mode. An insn should not be moved across this even if it only uses
2790 pseudo-regs because it might give an incorrectly rounded result. */
2791 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2792 && !DEBUG_INSN_P (insn))
2793 reg_pending_barrier = TRUE_BARRIER;
2795 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2796 We can not just fall through here since then we would be confused
2797 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2798 traditional asms unlike their normal usage. */
2800 if (code == ASM_OPERANDS)
2802 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2803 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2805 if (cslr_p && sched_deps_info->finish_rhs)
2806 sched_deps_info->finish_rhs ();
2808 return;
2810 break;
2813 case PRE_DEC:
2814 case POST_DEC:
2815 case PRE_INC:
2816 case POST_INC:
2817 /* These both read and modify the result. We must handle them as writes
2818 to get proper dependencies for following instructions. We must handle
2819 them as reads to get proper dependencies from this to previous
2820 instructions. Thus we need to pass them to both sched_analyze_1
2821 and sched_analyze_2. We must call sched_analyze_2 first in order
2822 to get the proper antecedent for the read. */
2823 sched_analyze_2 (deps, XEXP (x, 0), insn);
2824 sched_analyze_1 (deps, x, insn);
2826 if (cslr_p && sched_deps_info->finish_rhs)
2827 sched_deps_info->finish_rhs ();
2829 return;
2831 case POST_MODIFY:
2832 case PRE_MODIFY:
2833 /* op0 = op0 + op1 */
2834 sched_analyze_2 (deps, XEXP (x, 0), insn);
2835 sched_analyze_2 (deps, XEXP (x, 1), insn);
2836 sched_analyze_1 (deps, x, insn);
2838 if (cslr_p && sched_deps_info->finish_rhs)
2839 sched_deps_info->finish_rhs ();
2841 return;
2843 default:
2844 break;
2847 /* Other cases: walk the insn. */
2848 fmt = GET_RTX_FORMAT (code);
2849 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2851 if (fmt[i] == 'e')
2852 sched_analyze_2 (deps, XEXP (x, i), insn);
2853 else if (fmt[i] == 'E')
2854 for (j = 0; j < XVECLEN (x, i); j++)
2855 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2858 if (cslr_p && sched_deps_info->finish_rhs)
2859 sched_deps_info->finish_rhs ();
2862 /* Try to group two fuseable insns together to prevent scheduler
2863 from scheduling them apart. */
2865 static void
2866 sched_macro_fuse_insns (rtx_insn *insn)
2868 rtx_insn *prev;
2870 if (any_condjump_p (insn))
2872 unsigned int condreg1, condreg2;
2873 rtx cc_reg_1;
2874 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2875 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2876 prev = prev_nonnote_nondebug_insn (insn);
2877 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2878 || !prev
2879 || !modified_in_p (cc_reg_1, prev))
2880 return;
2882 else
2884 rtx insn_set = single_set (insn);
2886 prev = prev_nonnote_nondebug_insn (insn);
2887 if (!prev
2888 || !insn_set
2889 || !single_set (prev))
2890 return;
2894 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2895 SCHED_GROUP_P (insn) = 1;
2899 /* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
2900 void
2901 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2903 extract_insn (insn);
2904 preprocess_constraints (insn);
2905 alternative_mask preferred = get_preferred_alternatives (insn);
2906 ira_implicitly_set_insn_hard_regs (temp, preferred);
2907 AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2910 /* Analyze an INSN with pattern X to find all dependencies. */
2911 static void
2912 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2914 RTX_CODE code = GET_CODE (x);
2915 rtx link;
2916 unsigned i;
2917 reg_set_iterator rsi;
2919 if (! reload_completed)
2921 HARD_REG_SET temp;
2922 get_implicit_reg_pending_clobbers (&temp, insn);
2923 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2926 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2927 && code == SET);
2929 /* Group compare and branch insns for macro-fusion. */
2930 if (targetm.sched.macro_fusion_p
2931 && targetm.sched.macro_fusion_p ())
2932 sched_macro_fuse_insns (insn);
2934 if (may_trap_p (x))
2935 /* Avoid moving trapping instructions across function calls that might
2936 not always return. */
2937 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2938 1, REG_DEP_ANTI, true);
2940 /* We must avoid creating a situation in which two successors of the
2941 current block have different unwind info after scheduling. If at any
2942 point the two paths re-join this leads to incorrect unwind info. */
2943 /* ??? There are certain situations involving a forced frame pointer in
2944 which, with extra effort, we could fix up the unwind info at a later
2945 CFG join. However, it seems better to notice these cases earlier
2946 during prologue generation and avoid marking the frame pointer setup
2947 as frame-related at all. */
2948 if (RTX_FRAME_RELATED_P (insn))
2950 /* Make sure prologue insn is scheduled before next jump. */
2951 deps->sched_before_next_jump
2952 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2954 /* Make sure epilogue insn is scheduled after preceding jumps. */
2955 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2956 true);
2959 if (code == COND_EXEC)
2961 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2963 /* ??? Should be recording conditions so we reduce the number of
2964 false dependencies. */
2965 x = COND_EXEC_CODE (x);
2966 code = GET_CODE (x);
2968 if (code == SET || code == CLOBBER)
2970 sched_analyze_1 (deps, x, insn);
2972 /* Bare clobber insns are used for letting life analysis, reg-stack
2973 and others know that a value is dead. Depend on the last call
2974 instruction so that reg-stack won't get confused. */
2975 if (code == CLOBBER)
2976 add_dependence_list (insn, deps->last_function_call, 1,
2977 REG_DEP_OUTPUT, true);
2979 else if (code == PARALLEL)
2981 for (i = XVECLEN (x, 0); i--;)
2983 rtx sub = XVECEXP (x, 0, i);
2984 code = GET_CODE (sub);
2986 if (code == COND_EXEC)
2988 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2989 sub = COND_EXEC_CODE (sub);
2990 code = GET_CODE (sub);
2992 if (code == SET || code == CLOBBER)
2993 sched_analyze_1 (deps, sub, insn);
2994 else
2995 sched_analyze_2 (deps, sub, insn);
2998 else
2999 sched_analyze_2 (deps, x, insn);
3001 /* Mark registers CLOBBERED or used by called function. */
3002 if (CALL_P (insn))
3004 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
3006 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
3007 sched_analyze_1 (deps, XEXP (link, 0), insn);
3008 else if (GET_CODE (XEXP (link, 0)) != SET)
3009 sched_analyze_2 (deps, XEXP (link, 0), insn);
3011 /* Don't schedule anything after a tail call, tail call needs
3012 to use at least all call-saved registers. */
3013 if (SIBLING_CALL_P (insn))
3014 reg_pending_barrier = TRUE_BARRIER;
3015 else if (find_reg_note (insn, REG_SETJMP, NULL))
3016 reg_pending_barrier = MOVE_BARRIER;
3019 if (JUMP_P (insn))
3021 rtx next;
3022 next = next_nonnote_nondebug_insn (insn);
3023 if (next && BARRIER_P (next))
3024 reg_pending_barrier = MOVE_BARRIER;
3025 else
3027 rtx_insn_list *pending;
3028 rtx_expr_list *pending_mem;
3030 if (sched_deps_info->compute_jump_reg_dependencies)
3032 (*sched_deps_info->compute_jump_reg_dependencies)
3033 (insn, reg_pending_control_uses);
3035 /* Make latency of jump equal to 0 by using anti-dependence. */
3036 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3038 struct deps_reg *reg_last = &deps->reg_last[i];
3039 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3040 false);
3041 add_dependence_list (insn, reg_last->implicit_sets,
3042 0, REG_DEP_ANTI, false);
3043 add_dependence_list (insn, reg_last->clobbers, 0,
3044 REG_DEP_ANTI, false);
3048 /* All memory writes and volatile reads must happen before the
3049 jump. Non-volatile reads must happen before the jump iff
3050 the result is needed by the above register used mask. */
3052 pending = deps->pending_write_insns;
3053 pending_mem = deps->pending_write_mems;
3054 while (pending)
3056 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3057 add_dependence (insn, pending->insn (),
3058 REG_DEP_OUTPUT);
3059 pending = pending->next ();
3060 pending_mem = pending_mem->next ();
3063 pending = deps->pending_read_insns;
3064 pending_mem = deps->pending_read_mems;
3065 while (pending)
3067 if (MEM_VOLATILE_P (pending_mem->element ())
3068 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3069 add_dependence (insn, pending->insn (),
3070 REG_DEP_OUTPUT);
3071 pending = pending->next ();
3072 pending_mem = pending_mem->next ();
3075 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3076 REG_DEP_ANTI, true);
3077 add_dependence_list (insn, deps->pending_jump_insns, 1,
3078 REG_DEP_ANTI, true);
3082 /* If this instruction can throw an exception, then moving it changes
3083 where block boundaries fall. This is mighty confusing elsewhere.
3084 Therefore, prevent such an instruction from being moved. Same for
3085 non-jump instructions that define block boundaries.
3086 ??? Unclear whether this is still necessary in EBB mode. If not,
3087 add_branch_dependences should be adjusted for RGN mode instead. */
3088 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3089 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3090 reg_pending_barrier = MOVE_BARRIER;
3092 if (sched_pressure != SCHED_PRESSURE_NONE)
3094 setup_insn_reg_uses (deps, insn);
3095 init_insn_reg_pressure_info (insn);
3098 /* Add register dependencies for insn. */
3099 if (DEBUG_INSN_P (insn))
3101 rtx_insn *prev = deps->last_debug_insn;
3102 rtx u;
3104 if (!deps->readonly)
3105 deps->last_debug_insn = insn;
3107 if (prev)
3108 add_dependence (insn, prev, REG_DEP_ANTI);
3110 add_dependence_list (insn, deps->last_function_call, 1,
3111 REG_DEP_ANTI, false);
3113 if (!sel_sched_p ())
3114 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
3115 add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)), REG_DEP_ANTI);
3117 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3119 struct deps_reg *reg_last = &deps->reg_last[i];
3120 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3121 /* There's no point in making REG_DEP_CONTROL dependencies for
3122 debug insns. */
3123 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3124 false);
3126 if (!deps->readonly)
3127 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3129 CLEAR_REG_SET (reg_pending_uses);
3131 /* Quite often, a debug insn will refer to stuff in the
3132 previous instruction, but the reason we want this
3133 dependency here is to make sure the scheduler doesn't
3134 gratuitously move a debug insn ahead. This could dirty
3135 DF flags and cause additional analysis that wouldn't have
3136 occurred in compilation without debug insns, and such
3137 additional analysis can modify the generated code. */
3138 prev = PREV_INSN (insn);
3140 if (prev && NONDEBUG_INSN_P (prev))
3141 add_dependence (insn, prev, REG_DEP_ANTI);
3143 else
3145 regset_head set_or_clobbered;
3147 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3149 struct deps_reg *reg_last = &deps->reg_last[i];
3150 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3151 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3152 false);
3153 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3154 false);
3156 if (!deps->readonly)
3158 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3159 reg_last->uses_length++;
3163 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3164 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3166 struct deps_reg *reg_last = &deps->reg_last[i];
3167 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3168 add_dependence_list (insn, reg_last->implicit_sets, 0,
3169 REG_DEP_ANTI, false);
3170 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3171 false);
3173 if (!deps->readonly)
3175 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3176 reg_last->uses_length++;
3180 if (targetm.sched.exposed_pipeline)
3182 INIT_REG_SET (&set_or_clobbered);
3183 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3184 reg_pending_sets);
3185 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3187 struct deps_reg *reg_last = &deps->reg_last[i];
3188 rtx list;
3189 for (list = reg_last->uses; list; list = XEXP (list, 1))
3191 rtx other = XEXP (list, 0);
3192 if (INSN_CACHED_COND (other) != const_true_rtx
3193 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3194 INSN_CACHED_COND (other) = const_true_rtx;
3199 /* If the current insn is conditional, we can't free any
3200 of the lists. */
3201 if (sched_has_condition_p (insn))
3203 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3205 struct deps_reg *reg_last = &deps->reg_last[i];
3206 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3207 false);
3208 add_dependence_list (insn, reg_last->implicit_sets, 0,
3209 REG_DEP_ANTI, false);
3210 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3211 false);
3212 add_dependence_list (insn, reg_last->control_uses, 0,
3213 REG_DEP_CONTROL, false);
3215 if (!deps->readonly)
3217 reg_last->clobbers
3218 = alloc_INSN_LIST (insn, reg_last->clobbers);
3219 reg_last->clobbers_length++;
3222 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3224 struct deps_reg *reg_last = &deps->reg_last[i];
3225 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3226 false);
3227 add_dependence_list (insn, reg_last->implicit_sets, 0,
3228 REG_DEP_ANTI, false);
3229 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3230 false);
3231 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3232 false);
3233 add_dependence_list (insn, reg_last->control_uses, 0,
3234 REG_DEP_CONTROL, false);
3236 if (!deps->readonly)
3237 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3240 else
3242 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3244 struct deps_reg *reg_last = &deps->reg_last[i];
3245 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3246 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3248 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3249 REG_DEP_OUTPUT, false);
3250 add_dependence_list_and_free (deps, insn,
3251 &reg_last->implicit_sets, 0,
3252 REG_DEP_ANTI, false);
3253 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3254 REG_DEP_ANTI, false);
3255 add_dependence_list_and_free (deps, insn,
3256 &reg_last->control_uses, 0,
3257 REG_DEP_ANTI, false);
3258 add_dependence_list_and_free (deps, insn,
3259 &reg_last->clobbers, 0,
3260 REG_DEP_OUTPUT, false);
3262 if (!deps->readonly)
3264 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3265 reg_last->clobbers_length = 0;
3266 reg_last->uses_length = 0;
3269 else
3271 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3272 false);
3273 add_dependence_list (insn, reg_last->implicit_sets, 0,
3274 REG_DEP_ANTI, false);
3275 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3276 false);
3277 add_dependence_list (insn, reg_last->control_uses, 0,
3278 REG_DEP_CONTROL, false);
3281 if (!deps->readonly)
3283 reg_last->clobbers_length++;
3284 reg_last->clobbers
3285 = alloc_INSN_LIST (insn, reg_last->clobbers);
3288 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3290 struct deps_reg *reg_last = &deps->reg_last[i];
3292 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3293 REG_DEP_OUTPUT, false);
3294 add_dependence_list_and_free (deps, insn,
3295 &reg_last->implicit_sets,
3296 0, REG_DEP_ANTI, false);
3297 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3298 REG_DEP_OUTPUT, false);
3299 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3300 REG_DEP_ANTI, false);
3301 add_dependence_list (insn, reg_last->control_uses, 0,
3302 REG_DEP_CONTROL, false);
3304 if (!deps->readonly)
3306 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3307 reg_last->uses_length = 0;
3308 reg_last->clobbers_length = 0;
3312 if (!deps->readonly)
3314 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3316 struct deps_reg *reg_last = &deps->reg_last[i];
3317 reg_last->control_uses
3318 = alloc_INSN_LIST (insn, reg_last->control_uses);
3323 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3324 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3326 struct deps_reg *reg_last = &deps->reg_last[i];
3327 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3328 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3329 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3330 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3331 false);
3333 if (!deps->readonly)
3334 reg_last->implicit_sets
3335 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3338 if (!deps->readonly)
3340 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3341 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3342 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3343 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3344 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3345 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3346 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3348 /* Set up the pending barrier found. */
3349 deps->last_reg_pending_barrier = reg_pending_barrier;
3352 CLEAR_REG_SET (reg_pending_uses);
3353 CLEAR_REG_SET (reg_pending_clobbers);
3354 CLEAR_REG_SET (reg_pending_sets);
3355 CLEAR_REG_SET (reg_pending_control_uses);
3356 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3357 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3359 /* Add dependencies if a scheduling barrier was found. */
3360 if (reg_pending_barrier)
3362 /* In the case of barrier the most added dependencies are not
3363 real, so we use anti-dependence here. */
3364 if (sched_has_condition_p (insn))
3366 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3368 struct deps_reg *reg_last = &deps->reg_last[i];
3369 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3370 true);
3371 add_dependence_list (insn, reg_last->sets, 0,
3372 reg_pending_barrier == TRUE_BARRIER
3373 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3374 add_dependence_list (insn, reg_last->implicit_sets, 0,
3375 REG_DEP_ANTI, true);
3376 add_dependence_list (insn, reg_last->clobbers, 0,
3377 reg_pending_barrier == TRUE_BARRIER
3378 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3381 else
3383 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3385 struct deps_reg *reg_last = &deps->reg_last[i];
3386 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3387 REG_DEP_ANTI, true);
3388 add_dependence_list_and_free (deps, insn,
3389 &reg_last->control_uses, 0,
3390 REG_DEP_CONTROL, true);
3391 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3392 reg_pending_barrier == TRUE_BARRIER
3393 ? REG_DEP_TRUE : REG_DEP_ANTI,
3394 true);
3395 add_dependence_list_and_free (deps, insn,
3396 &reg_last->implicit_sets, 0,
3397 REG_DEP_ANTI, true);
3398 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3399 reg_pending_barrier == TRUE_BARRIER
3400 ? REG_DEP_TRUE : REG_DEP_ANTI,
3401 true);
3403 if (!deps->readonly)
3405 reg_last->uses_length = 0;
3406 reg_last->clobbers_length = 0;
3411 if (!deps->readonly)
3412 for (i = 0; i < (unsigned)deps->max_reg; i++)
3414 struct deps_reg *reg_last = &deps->reg_last[i];
3415 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3416 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3419 /* Don't flush pending lists on speculative checks for
3420 selective scheduling. */
3421 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3422 flush_pending_lists (deps, insn, true, true);
3424 reg_pending_barrier = NOT_A_BARRIER;
3427 /* If a post-call group is still open, see if it should remain so.
3428 This insn must be a simple move of a hard reg to a pseudo or
3429 vice-versa.
3431 We must avoid moving these insns for correctness on targets
3432 with small register classes, and for special registers like
3433 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3434 hard regs for all targets. */
3436 if (deps->in_post_call_group_p)
3438 rtx tmp, set = single_set (insn);
3439 int src_regno, dest_regno;
3441 if (set == NULL)
3443 if (DEBUG_INSN_P (insn))
3444 /* We don't want to mark debug insns as part of the same
3445 sched group. We know they really aren't, but if we use
3446 debug insns to tell that a call group is over, we'll
3447 get different code if debug insns are not there and
3448 instructions that follow seem like they should be part
3449 of the call group.
3451 Also, if we did, chain_to_prev_insn would move the
3452 deps of the debug insn to the call insn, modifying
3453 non-debug post-dependency counts of the debug insn
3454 dependencies and otherwise messing with the scheduling
3455 order.
3457 Instead, let such debug insns be scheduled freely, but
3458 keep the call group open in case there are insns that
3459 should be part of it afterwards. Since we grant debug
3460 insns higher priority than even sched group insns, it
3461 will all turn out all right. */
3462 goto debug_dont_end_call_group;
3463 else
3464 goto end_call_group;
3467 tmp = SET_DEST (set);
3468 if (GET_CODE (tmp) == SUBREG)
3469 tmp = SUBREG_REG (tmp);
3470 if (REG_P (tmp))
3471 dest_regno = REGNO (tmp);
3472 else
3473 goto end_call_group;
3475 tmp = SET_SRC (set);
3476 if (GET_CODE (tmp) == SUBREG)
3477 tmp = SUBREG_REG (tmp);
3478 if ((GET_CODE (tmp) == PLUS
3479 || GET_CODE (tmp) == MINUS)
3480 && REG_P (XEXP (tmp, 0))
3481 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3482 && dest_regno == STACK_POINTER_REGNUM)
3483 src_regno = STACK_POINTER_REGNUM;
3484 else if (REG_P (tmp))
3485 src_regno = REGNO (tmp);
3486 else
3487 goto end_call_group;
3489 if (src_regno < FIRST_PSEUDO_REGISTER
3490 || dest_regno < FIRST_PSEUDO_REGISTER)
3492 if (!deps->readonly
3493 && deps->in_post_call_group_p == post_call_initial)
3494 deps->in_post_call_group_p = post_call;
3496 if (!sel_sched_p () || sched_emulate_haifa_p)
3498 SCHED_GROUP_P (insn) = 1;
3499 CANT_MOVE (insn) = 1;
3502 else
3504 end_call_group:
3505 if (!deps->readonly)
3506 deps->in_post_call_group_p = not_post_call;
3510 debug_dont_end_call_group:
3511 if ((current_sched_info->flags & DO_SPECULATION)
3512 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3513 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3514 be speculated. */
3516 if (sel_sched_p ())
3517 sel_mark_hard_insn (insn);
3518 else
3520 sd_iterator_def sd_it;
3521 dep_t dep;
3523 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3524 sd_iterator_cond (&sd_it, &dep);)
3525 change_spec_dep_to_hard (sd_it);
3529 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3530 honor their original ordering. */
3531 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3533 if (deps->last_args_size)
3534 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3535 if (!deps->readonly)
3536 deps->last_args_size = insn;
3540 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3541 longjmp, loop forever, ...). */
3542 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3543 test for ECF_NORETURN? */
3544 static bool
3545 call_may_noreturn_p (rtx insn)
3547 rtx call;
3549 /* const or pure calls that aren't looping will always return. */
3550 if (RTL_CONST_OR_PURE_CALL_P (insn)
3551 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3552 return false;
3554 call = get_call_rtx_from (insn);
3555 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3557 rtx symbol = XEXP (XEXP (call, 0), 0);
3558 if (SYMBOL_REF_DECL (symbol)
3559 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3561 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3562 == BUILT_IN_NORMAL)
3563 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3565 case BUILT_IN_BCMP:
3566 case BUILT_IN_BCOPY:
3567 case BUILT_IN_BZERO:
3568 case BUILT_IN_INDEX:
3569 case BUILT_IN_MEMCHR:
3570 case BUILT_IN_MEMCMP:
3571 case BUILT_IN_MEMCPY:
3572 case BUILT_IN_MEMMOVE:
3573 case BUILT_IN_MEMPCPY:
3574 case BUILT_IN_MEMSET:
3575 case BUILT_IN_RINDEX:
3576 case BUILT_IN_STPCPY:
3577 case BUILT_IN_STPNCPY:
3578 case BUILT_IN_STRCAT:
3579 case BUILT_IN_STRCHR:
3580 case BUILT_IN_STRCMP:
3581 case BUILT_IN_STRCPY:
3582 case BUILT_IN_STRCSPN:
3583 case BUILT_IN_STRLEN:
3584 case BUILT_IN_STRNCAT:
3585 case BUILT_IN_STRNCMP:
3586 case BUILT_IN_STRNCPY:
3587 case BUILT_IN_STRPBRK:
3588 case BUILT_IN_STRRCHR:
3589 case BUILT_IN_STRSPN:
3590 case BUILT_IN_STRSTR:
3591 /* Assume certain string/memory builtins always return. */
3592 return false;
3593 default:
3594 break;
3599 /* For all other calls assume that they might not always return. */
3600 return true;
3603 /* Return true if INSN should be made dependent on the previous instruction
3604 group, and if all INSN's dependencies should be moved to the first
3605 instruction of that group. */
3607 static bool
3608 chain_to_prev_insn_p (rtx insn)
3610 rtx prev, x;
3612 /* INSN forms a group with the previous instruction. */
3613 if (SCHED_GROUP_P (insn))
3614 return true;
3616 /* If the previous instruction clobbers a register R and this one sets
3617 part of R, the clobber was added specifically to help us track the
3618 liveness of R. There's no point scheduling the clobber and leaving
3619 INSN behind, especially if we move the clobber to another block. */
3620 prev = prev_nonnote_nondebug_insn (insn);
3621 if (prev
3622 && INSN_P (prev)
3623 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3624 && GET_CODE (PATTERN (prev)) == CLOBBER)
3626 x = XEXP (PATTERN (prev), 0);
3627 if (set_of (x, insn))
3628 return true;
3631 return false;
3634 /* Analyze INSN with DEPS as a context. */
3635 void
3636 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3638 if (sched_deps_info->start_insn)
3639 sched_deps_info->start_insn (insn);
3641 /* Record the condition for this insn. */
3642 if (NONDEBUG_INSN_P (insn))
3644 rtx t;
3645 sched_get_condition_with_rev (insn, NULL);
3646 t = INSN_CACHED_COND (insn);
3647 INSN_COND_DEPS (insn) = NULL;
3648 if (reload_completed
3649 && (current_sched_info->flags & DO_PREDICATION)
3650 && COMPARISON_P (t)
3651 && REG_P (XEXP (t, 0))
3652 && CONSTANT_P (XEXP (t, 1)))
3654 unsigned int regno;
3655 int nregs;
3656 rtx_insn_list *cond_deps = NULL;
3657 t = XEXP (t, 0);
3658 regno = REGNO (t);
3659 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3660 while (nregs-- > 0)
3662 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3663 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3664 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3665 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3667 INSN_COND_DEPS (insn) = cond_deps;
3671 if (JUMP_P (insn))
3673 /* Make each JUMP_INSN (but not a speculative check)
3674 a scheduling barrier for memory references. */
3675 if (!deps->readonly
3676 && !(sel_sched_p ()
3677 && sel_insn_is_speculation_check (insn)))
3679 /* Keep the list a reasonable size. */
3680 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3681 flush_pending_lists (deps, insn, true, true);
3682 else
3683 deps->pending_jump_insns
3684 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3687 /* For each insn which shouldn't cross a jump, add a dependence. */
3688 add_dependence_list_and_free (deps, insn,
3689 &deps->sched_before_next_jump, 1,
3690 REG_DEP_ANTI, true);
3692 sched_analyze_insn (deps, PATTERN (insn), insn);
3694 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3696 sched_analyze_insn (deps, PATTERN (insn), insn);
3698 else if (CALL_P (insn))
3700 int i;
3702 CANT_MOVE (insn) = 1;
3704 if (find_reg_note (insn, REG_SETJMP, NULL))
3706 /* This is setjmp. Assume that all registers, not just
3707 hard registers, may be clobbered by this call. */
3708 reg_pending_barrier = MOVE_BARRIER;
3710 else
3712 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3713 /* A call may read and modify global register variables. */
3714 if (global_regs[i])
3716 SET_REGNO_REG_SET (reg_pending_sets, i);
3717 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3719 /* Other call-clobbered hard regs may be clobbered.
3720 Since we only have a choice between 'might be clobbered'
3721 and 'definitely not clobbered', we must include all
3722 partly call-clobbered registers here. */
3723 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3724 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3725 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3726 /* We don't know what set of fixed registers might be used
3727 by the function, but it is certain that the stack pointer
3728 is among them, but be conservative. */
3729 else if (fixed_regs[i])
3730 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3731 /* The frame pointer is normally not used by the function
3732 itself, but by the debugger. */
3733 /* ??? MIPS o32 is an exception. It uses the frame pointer
3734 in the macro expansion of jal but does not represent this
3735 fact in the call_insn rtl. */
3736 else if (i == FRAME_POINTER_REGNUM
3737 || (i == HARD_FRAME_POINTER_REGNUM
3738 && (! reload_completed || frame_pointer_needed)))
3739 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3742 /* For each insn which shouldn't cross a call, add a dependence
3743 between that insn and this call insn. */
3744 add_dependence_list_and_free (deps, insn,
3745 &deps->sched_before_next_call, 1,
3746 REG_DEP_ANTI, true);
3748 sched_analyze_insn (deps, PATTERN (insn), insn);
3750 /* If CALL would be in a sched group, then this will violate
3751 convention that sched group insns have dependencies only on the
3752 previous instruction.
3754 Of course one can say: "Hey! What about head of the sched group?"
3755 And I will answer: "Basic principles (one dep per insn) are always
3756 the same." */
3757 gcc_assert (!SCHED_GROUP_P (insn));
3759 /* In the absence of interprocedural alias analysis, we must flush
3760 all pending reads and writes, and start new dependencies starting
3761 from here. But only flush writes for constant calls (which may
3762 be passed a pointer to something we haven't written yet). */
3763 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3765 if (!deps->readonly)
3767 /* Remember the last function call for limiting lifetimes. */
3768 free_INSN_LIST_list (&deps->last_function_call);
3769 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3771 if (call_may_noreturn_p (insn))
3773 /* Remember the last function call that might not always return
3774 normally for limiting moves of trapping insns. */
3775 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3776 deps->last_function_call_may_noreturn
3777 = alloc_INSN_LIST (insn, NULL_RTX);
3780 /* Before reload, begin a post-call group, so as to keep the
3781 lifetimes of hard registers correct. */
3782 if (! reload_completed)
3783 deps->in_post_call_group_p = post_call;
3787 if (sched_deps_info->use_cselib)
3788 cselib_process_insn (insn);
3790 if (sched_deps_info->finish_insn)
3791 sched_deps_info->finish_insn ();
3793 /* Fixup the dependencies in the sched group. */
3794 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3795 && chain_to_prev_insn_p (insn)
3796 && !sel_sched_p ())
3797 chain_to_prev_insn (insn);
3800 /* Initialize DEPS for the new block beginning with HEAD. */
3801 void
3802 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3804 gcc_assert (!deps->readonly);
3806 /* Before reload, if the previous block ended in a call, show that
3807 we are inside a post-call group, so as to keep the lifetimes of
3808 hard registers correct. */
3809 if (! reload_completed && !LABEL_P (head))
3811 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3813 if (insn && CALL_P (insn))
3814 deps->in_post_call_group_p = post_call_initial;
3818 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3819 dependencies for each insn. */
3820 void
3821 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3823 rtx_insn *insn;
3825 if (sched_deps_info->use_cselib)
3826 cselib_init (CSELIB_RECORD_MEMORY);
3828 deps_start_bb (deps, head);
3830 for (insn = head;; insn = NEXT_INSN (insn))
3833 if (INSN_P (insn))
3835 /* And initialize deps_lists. */
3836 sd_init_insn (insn);
3837 /* Clean up SCHED_GROUP_P which may be set by last
3838 scheduler pass. */
3839 if (SCHED_GROUP_P (insn))
3840 SCHED_GROUP_P (insn) = 0;
3843 deps_analyze_insn (deps, insn);
3845 if (insn == tail)
3847 if (sched_deps_info->use_cselib)
3848 cselib_finish ();
3849 return;
3852 gcc_unreachable ();
3855 /* Helper for sched_free_deps ().
3856 Delete INSN's (RESOLVED_P) backward dependencies. */
3857 static void
3858 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3860 sd_iterator_def sd_it;
3861 dep_t dep;
3862 sd_list_types_def types;
3864 if (resolved_p)
3865 types = SD_LIST_RES_BACK;
3866 else
3867 types = SD_LIST_BACK;
3869 for (sd_it = sd_iterator_start (insn, types);
3870 sd_iterator_cond (&sd_it, &dep);)
3872 dep_link_t link = *sd_it.linkp;
3873 dep_node_t node = DEP_LINK_NODE (link);
3874 deps_list_t back_list;
3875 deps_list_t forw_list;
3877 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3878 remove_from_deps_list (link, back_list);
3879 delete_dep_node (node);
3883 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3884 deps_lists. */
3885 void
3886 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3888 rtx_insn *insn;
3889 rtx_insn *next_tail = NEXT_INSN (tail);
3891 /* We make two passes since some insns may be scheduled before their
3892 dependencies are resolved. */
3893 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3894 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3896 /* Clear forward deps and leave the dep_nodes to the
3897 corresponding back_deps list. */
3898 if (resolved_p)
3899 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3900 else
3901 clear_deps_list (INSN_FORW_DEPS (insn));
3903 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3904 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3906 /* Clear resolved back deps together with its dep_nodes. */
3907 delete_dep_nodes_in_back_deps (insn, resolved_p);
3909 sd_finish_insn (insn);
3913 /* Initialize variables for region data dependence analysis.
3914 When LAZY_REG_LAST is true, do not allocate reg_last array
3915 of struct deps_desc immediately. */
3917 void
3918 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3920 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3922 deps->max_reg = max_reg;
3923 if (lazy_reg_last)
3924 deps->reg_last = NULL;
3925 else
3926 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3927 INIT_REG_SET (&deps->reg_last_in_use);
3929 deps->pending_read_insns = 0;
3930 deps->pending_read_mems = 0;
3931 deps->pending_write_insns = 0;
3932 deps->pending_write_mems = 0;
3933 deps->pending_jump_insns = 0;
3934 deps->pending_read_list_length = 0;
3935 deps->pending_write_list_length = 0;
3936 deps->pending_flush_length = 0;
3937 deps->last_pending_memory_flush = 0;
3938 deps->last_function_call = 0;
3939 deps->last_function_call_may_noreturn = 0;
3940 deps->sched_before_next_call = 0;
3941 deps->sched_before_next_jump = 0;
3942 deps->in_post_call_group_p = not_post_call;
3943 deps->last_debug_insn = 0;
3944 deps->last_args_size = 0;
3945 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3946 deps->readonly = 0;
3949 /* Init only reg_last field of DEPS, which was not allocated before as
3950 we inited DEPS lazily. */
3951 void
3952 init_deps_reg_last (struct deps_desc *deps)
3954 gcc_assert (deps && deps->max_reg > 0);
3955 gcc_assert (deps->reg_last == NULL);
3957 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3961 /* Free insn lists found in DEPS. */
3963 void
3964 free_deps (struct deps_desc *deps)
3966 unsigned i;
3967 reg_set_iterator rsi;
3969 /* We set max_reg to 0 when this context was already freed. */
3970 if (deps->max_reg == 0)
3972 gcc_assert (deps->reg_last == NULL);
3973 return;
3975 deps->max_reg = 0;
3977 free_INSN_LIST_list (&deps->pending_read_insns);
3978 free_EXPR_LIST_list (&deps->pending_read_mems);
3979 free_INSN_LIST_list (&deps->pending_write_insns);
3980 free_EXPR_LIST_list (&deps->pending_write_mems);
3981 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3983 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3984 times. For a testcase with 42000 regs and 8000 small basic blocks,
3985 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3986 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3988 struct deps_reg *reg_last = &deps->reg_last[i];
3989 if (reg_last->uses)
3990 free_INSN_LIST_list (&reg_last->uses);
3991 if (reg_last->sets)
3992 free_INSN_LIST_list (&reg_last->sets);
3993 if (reg_last->implicit_sets)
3994 free_INSN_LIST_list (&reg_last->implicit_sets);
3995 if (reg_last->control_uses)
3996 free_INSN_LIST_list (&reg_last->control_uses);
3997 if (reg_last->clobbers)
3998 free_INSN_LIST_list (&reg_last->clobbers);
4000 CLEAR_REG_SET (&deps->reg_last_in_use);
4002 /* As we initialize reg_last lazily, it is possible that we didn't allocate
4003 it at all. */
4004 free (deps->reg_last);
4005 deps->reg_last = NULL;
4007 deps = NULL;
4010 /* Remove INSN from dependence contexts DEPS. */
4011 void
4012 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4014 int removed;
4015 unsigned i;
4016 reg_set_iterator rsi;
4018 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4019 &deps->pending_read_mems);
4020 if (!DEBUG_INSN_P (insn))
4021 deps->pending_read_list_length -= removed;
4022 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4023 &deps->pending_write_mems);
4024 deps->pending_write_list_length -= removed;
4026 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4027 deps->pending_flush_length -= removed;
4028 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4029 deps->pending_flush_length -= removed;
4031 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4033 struct deps_reg *reg_last = &deps->reg_last[i];
4034 if (reg_last->uses)
4035 remove_from_dependence_list (insn, &reg_last->uses);
4036 if (reg_last->sets)
4037 remove_from_dependence_list (insn, &reg_last->sets);
4038 if (reg_last->implicit_sets)
4039 remove_from_dependence_list (insn, &reg_last->implicit_sets);
4040 if (reg_last->clobbers)
4041 remove_from_dependence_list (insn, &reg_last->clobbers);
4042 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4043 && !reg_last->clobbers)
4044 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4047 if (CALL_P (insn))
4049 remove_from_dependence_list (insn, &deps->last_function_call);
4050 remove_from_dependence_list (insn,
4051 &deps->last_function_call_may_noreturn);
4053 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4056 /* Init deps data vector. */
4057 static void
4058 init_deps_data_vector (void)
4060 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4061 if (reserve > 0 && ! h_d_i_d.space (reserve))
4062 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4065 /* If it is profitable to use them, initialize or extend (depending on
4066 GLOBAL_P) dependency data. */
4067 void
4068 sched_deps_init (bool global_p)
4070 /* Average number of insns in the basic block.
4071 '+ 1' is used to make it nonzero. */
4072 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4074 init_deps_data_vector ();
4076 /* We use another caching mechanism for selective scheduling, so
4077 we don't use this one. */
4078 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4080 /* ?!? We could save some memory by computing a per-region luid mapping
4081 which could reduce both the number of vectors in the cache and the
4082 size of each vector. Instead we just avoid the cache entirely unless
4083 the average number of instructions in a basic block is very high. See
4084 the comment before the declaration of true_dependency_cache for
4085 what we consider "very high". */
4086 cache_size = 0;
4087 extend_dependency_caches (sched_max_luid, true);
4090 if (global_p)
4092 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
4093 /* Allocate lists for one block at a time. */
4094 insns_in_block);
4095 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
4096 /* Allocate nodes for one block at a time.
4097 We assume that average insn has
4098 5 producers. */
4099 5 * insns_in_block);
4104 /* Create or extend (depending on CREATE_P) dependency caches to
4105 size N. */
4106 void
4107 extend_dependency_caches (int n, bool create_p)
4109 if (create_p || true_dependency_cache)
4111 int i, luid = cache_size + n;
4113 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4114 luid);
4115 output_dependency_cache = XRESIZEVEC (bitmap_head,
4116 output_dependency_cache, luid);
4117 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4118 luid);
4119 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4120 luid);
4122 if (current_sched_info->flags & DO_SPECULATION)
4123 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4124 luid);
4126 for (i = cache_size; i < luid; i++)
4128 bitmap_initialize (&true_dependency_cache[i], 0);
4129 bitmap_initialize (&output_dependency_cache[i], 0);
4130 bitmap_initialize (&anti_dependency_cache[i], 0);
4131 bitmap_initialize (&control_dependency_cache[i], 0);
4133 if (current_sched_info->flags & DO_SPECULATION)
4134 bitmap_initialize (&spec_dependency_cache[i], 0);
4136 cache_size = luid;
4140 /* Finalize dependency information for the whole function. */
4141 void
4142 sched_deps_finish (void)
4144 gcc_assert (deps_pools_are_empty_p ());
4145 free_alloc_pool_if_empty (&dn_pool);
4146 free_alloc_pool_if_empty (&dl_pool);
4147 gcc_assert (dn_pool == NULL && dl_pool == NULL);
4149 h_d_i_d.release ();
4150 cache_size = 0;
4152 if (true_dependency_cache)
4154 int i;
4156 for (i = 0; i < cache_size; i++)
4158 bitmap_clear (&true_dependency_cache[i]);
4159 bitmap_clear (&output_dependency_cache[i]);
4160 bitmap_clear (&anti_dependency_cache[i]);
4161 bitmap_clear (&control_dependency_cache[i]);
4163 if (sched_deps_info->generate_spec_deps)
4164 bitmap_clear (&spec_dependency_cache[i]);
4166 free (true_dependency_cache);
4167 true_dependency_cache = NULL;
4168 free (output_dependency_cache);
4169 output_dependency_cache = NULL;
4170 free (anti_dependency_cache);
4171 anti_dependency_cache = NULL;
4172 free (control_dependency_cache);
4173 control_dependency_cache = NULL;
4175 if (sched_deps_info->generate_spec_deps)
4177 free (spec_dependency_cache);
4178 spec_dependency_cache = NULL;
4184 /* Initialize some global variables needed by the dependency analysis
4185 code. */
4187 void
4188 init_deps_global (void)
4190 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4191 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4192 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4193 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4194 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4195 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4196 reg_pending_barrier = NOT_A_BARRIER;
4198 if (!sel_sched_p () || sched_emulate_haifa_p)
4200 sched_deps_info->start_insn = haifa_start_insn;
4201 sched_deps_info->finish_insn = haifa_finish_insn;
4203 sched_deps_info->note_reg_set = haifa_note_reg_set;
4204 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4205 sched_deps_info->note_reg_use = haifa_note_reg_use;
4207 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4208 sched_deps_info->note_dep = haifa_note_dep;
4212 /* Free everything used by the dependency analysis code. */
4214 void
4215 finish_deps_global (void)
4217 FREE_REG_SET (reg_pending_sets);
4218 FREE_REG_SET (reg_pending_clobbers);
4219 FREE_REG_SET (reg_pending_uses);
4220 FREE_REG_SET (reg_pending_control_uses);
4223 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4224 dw_t
4225 estimate_dep_weak (rtx mem1, rtx mem2)
4227 rtx r1, r2;
4229 if (mem1 == mem2)
4230 /* MEMs are the same - don't speculate. */
4231 return MIN_DEP_WEAK;
4233 r1 = XEXP (mem1, 0);
4234 r2 = XEXP (mem2, 0);
4236 if (r1 == r2
4237 || (REG_P (r1) && REG_P (r2)
4238 && REGNO (r1) == REGNO (r2)))
4239 /* Again, MEMs are the same. */
4240 return MIN_DEP_WEAK;
4241 else if ((REG_P (r1) && !REG_P (r2))
4242 || (!REG_P (r1) && REG_P (r2)))
4243 /* Different addressing modes - reason to be more speculative,
4244 than usual. */
4245 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4246 else
4247 /* We can't say anything about the dependence. */
4248 return UNCERTAIN_DEP_WEAK;
4251 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4252 This function can handle same INSN and ELEM (INSN == ELEM).
4253 It is a convenience wrapper. */
4254 static void
4255 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4257 ds_t ds;
4258 bool internal;
4260 if (dep_type == REG_DEP_TRUE)
4261 ds = DEP_TRUE;
4262 else if (dep_type == REG_DEP_OUTPUT)
4263 ds = DEP_OUTPUT;
4264 else if (dep_type == REG_DEP_CONTROL)
4265 ds = DEP_CONTROL;
4266 else
4268 gcc_assert (dep_type == REG_DEP_ANTI);
4269 ds = DEP_ANTI;
4272 /* When add_dependence is called from inside sched-deps.c, we expect
4273 cur_insn to be non-null. */
4274 internal = cur_insn != NULL;
4275 if (internal)
4276 gcc_assert (insn == cur_insn);
4277 else
4278 cur_insn = insn;
4280 note_dep (elem, ds);
4281 if (!internal)
4282 cur_insn = NULL;
4285 /* Return weakness of speculative type TYPE in the dep_status DS,
4286 without checking to prevent ICEs on malformed input. */
4287 static dw_t
4288 get_dep_weak_1 (ds_t ds, ds_t type)
4290 ds = ds & type;
4292 switch (type)
4294 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4295 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4296 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4297 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4298 default: gcc_unreachable ();
4301 return (dw_t) ds;
4304 /* Return weakness of speculative type TYPE in the dep_status DS. */
4305 dw_t
4306 get_dep_weak (ds_t ds, ds_t type)
4308 dw_t dw = get_dep_weak_1 (ds, type);
4310 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4311 return dw;
4314 /* Return the dep_status, which has the same parameters as DS, except for
4315 speculative type TYPE, that will have weakness DW. */
4316 ds_t
4317 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4319 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4321 ds &= ~type;
4322 switch (type)
4324 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4325 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4326 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4327 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4328 default: gcc_unreachable ();
4330 return ds;
4333 /* Return the join of two dep_statuses DS1 and DS2.
4334 If MAX_P is true then choose the greater probability,
4335 otherwise multiply probabilities.
4336 This function assumes that both DS1 and DS2 contain speculative bits. */
4337 static ds_t
4338 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4340 ds_t ds, t;
4342 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4344 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4346 t = FIRST_SPEC_TYPE;
4349 if ((ds1 & t) && !(ds2 & t))
4350 ds |= ds1 & t;
4351 else if (!(ds1 & t) && (ds2 & t))
4352 ds |= ds2 & t;
4353 else if ((ds1 & t) && (ds2 & t))
4355 dw_t dw1 = get_dep_weak (ds1, t);
4356 dw_t dw2 = get_dep_weak (ds2, t);
4357 ds_t dw;
4359 if (!max_p)
4361 dw = ((ds_t) dw1) * ((ds_t) dw2);
4362 dw /= MAX_DEP_WEAK;
4363 if (dw < MIN_DEP_WEAK)
4364 dw = MIN_DEP_WEAK;
4366 else
4368 if (dw1 >= dw2)
4369 dw = dw1;
4370 else
4371 dw = dw2;
4374 ds = set_dep_weak (ds, t, (dw_t) dw);
4377 if (t == LAST_SPEC_TYPE)
4378 break;
4379 t <<= SPEC_TYPE_SHIFT;
4381 while (1);
4383 return ds;
4386 /* Return the join of two dep_statuses DS1 and DS2.
4387 This function assumes that both DS1 and DS2 contain speculative bits. */
4388 ds_t
4389 ds_merge (ds_t ds1, ds_t ds2)
4391 return ds_merge_1 (ds1, ds2, false);
4394 /* Return the join of two dep_statuses DS1 and DS2. */
4395 ds_t
4396 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4398 ds_t new_status = ds | ds2;
4400 if (new_status & SPECULATIVE)
4402 if ((ds && !(ds & SPECULATIVE))
4403 || (ds2 && !(ds2 & SPECULATIVE)))
4404 /* Then this dep can't be speculative. */
4405 new_status &= ~SPECULATIVE;
4406 else
4408 /* Both are speculative. Merging probabilities. */
4409 if (mem1)
4411 dw_t dw;
4413 dw = estimate_dep_weak (mem1, mem2);
4414 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4417 if (!ds)
4418 new_status = ds2;
4419 else if (!ds2)
4420 new_status = ds;
4421 else
4422 new_status = ds_merge (ds2, ds);
4426 return new_status;
4429 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4430 probabilities. */
4431 ds_t
4432 ds_max_merge (ds_t ds1, ds_t ds2)
4434 if (ds1 == 0 && ds2 == 0)
4435 return 0;
4437 if (ds1 == 0 && ds2 != 0)
4438 return ds2;
4440 if (ds1 != 0 && ds2 == 0)
4441 return ds1;
4443 return ds_merge_1 (ds1, ds2, true);
4446 /* Return the probability of speculation success for the speculation
4447 status DS. */
4448 dw_t
4449 ds_weak (ds_t ds)
4451 ds_t res = 1, dt;
4452 int n = 0;
4454 dt = FIRST_SPEC_TYPE;
4457 if (ds & dt)
4459 res *= (ds_t) get_dep_weak (ds, dt);
4460 n++;
4463 if (dt == LAST_SPEC_TYPE)
4464 break;
4465 dt <<= SPEC_TYPE_SHIFT;
4467 while (1);
4469 gcc_assert (n);
4470 while (--n)
4471 res /= MAX_DEP_WEAK;
4473 if (res < MIN_DEP_WEAK)
4474 res = MIN_DEP_WEAK;
4476 gcc_assert (res <= MAX_DEP_WEAK);
4478 return (dw_t) res;
4481 /* Return a dep status that contains all speculation types of DS. */
4482 ds_t
4483 ds_get_speculation_types (ds_t ds)
4485 if (ds & BEGIN_DATA)
4486 ds |= BEGIN_DATA;
4487 if (ds & BE_IN_DATA)
4488 ds |= BE_IN_DATA;
4489 if (ds & BEGIN_CONTROL)
4490 ds |= BEGIN_CONTROL;
4491 if (ds & BE_IN_CONTROL)
4492 ds |= BE_IN_CONTROL;
4494 return ds & SPECULATIVE;
4497 /* Return a dep status that contains maximal weakness for each speculation
4498 type present in DS. */
4499 ds_t
4500 ds_get_max_dep_weak (ds_t ds)
4502 if (ds & BEGIN_DATA)
4503 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4504 if (ds & BE_IN_DATA)
4505 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4506 if (ds & BEGIN_CONTROL)
4507 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4508 if (ds & BE_IN_CONTROL)
4509 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4511 return ds;
4514 /* Dump information about the dependence status S. */
4515 static void
4516 dump_ds (FILE *f, ds_t s)
4518 fprintf (f, "{");
4520 if (s & BEGIN_DATA)
4521 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4522 if (s & BE_IN_DATA)
4523 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4524 if (s & BEGIN_CONTROL)
4525 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4526 if (s & BE_IN_CONTROL)
4527 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4529 if (s & HARD_DEP)
4530 fprintf (f, "HARD_DEP; ");
4532 if (s & DEP_TRUE)
4533 fprintf (f, "DEP_TRUE; ");
4534 if (s & DEP_OUTPUT)
4535 fprintf (f, "DEP_OUTPUT; ");
4536 if (s & DEP_ANTI)
4537 fprintf (f, "DEP_ANTI; ");
4538 if (s & DEP_CONTROL)
4539 fprintf (f, "DEP_CONTROL; ");
4541 fprintf (f, "}");
4544 DEBUG_FUNCTION void
4545 debug_ds (ds_t s)
4547 dump_ds (stderr, s);
4548 fprintf (stderr, "\n");
4551 #ifdef ENABLE_CHECKING
4552 /* Verify that dependence type and status are consistent.
4553 If RELAXED_P is true, then skip dep_weakness checks. */
4554 static void
4555 check_dep (dep_t dep, bool relaxed_p)
4557 enum reg_note dt = DEP_TYPE (dep);
4558 ds_t ds = DEP_STATUS (dep);
4560 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4562 if (!(current_sched_info->flags & USE_DEPS_LIST))
4564 gcc_assert (ds == 0);
4565 return;
4568 /* Check that dependence type contains the same bits as the status. */
4569 if (dt == REG_DEP_TRUE)
4570 gcc_assert (ds & DEP_TRUE);
4571 else if (dt == REG_DEP_OUTPUT)
4572 gcc_assert ((ds & DEP_OUTPUT)
4573 && !(ds & DEP_TRUE));
4574 else if (dt == REG_DEP_ANTI)
4575 gcc_assert ((ds & DEP_ANTI)
4576 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4577 else
4578 gcc_assert (dt == REG_DEP_CONTROL
4579 && (ds & DEP_CONTROL)
4580 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4582 /* HARD_DEP can not appear in dep_status of a link. */
4583 gcc_assert (!(ds & HARD_DEP));
4585 /* Check that dependence status is set correctly when speculation is not
4586 supported. */
4587 if (!sched_deps_info->generate_spec_deps)
4588 gcc_assert (!(ds & SPECULATIVE));
4589 else if (ds & SPECULATIVE)
4591 if (!relaxed_p)
4593 ds_t type = FIRST_SPEC_TYPE;
4595 /* Check that dependence weakness is in proper range. */
4598 if (ds & type)
4599 get_dep_weak (ds, type);
4601 if (type == LAST_SPEC_TYPE)
4602 break;
4603 type <<= SPEC_TYPE_SHIFT;
4605 while (1);
4608 if (ds & BEGIN_SPEC)
4610 /* Only true dependence can be data speculative. */
4611 if (ds & BEGIN_DATA)
4612 gcc_assert (ds & DEP_TRUE);
4614 /* Control dependencies in the insn scheduler are represented by
4615 anti-dependencies, therefore only anti dependence can be
4616 control speculative. */
4617 if (ds & BEGIN_CONTROL)
4618 gcc_assert (ds & DEP_ANTI);
4620 else
4622 /* Subsequent speculations should resolve true dependencies. */
4623 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4626 /* Check that true and anti dependencies can't have other speculative
4627 statuses. */
4628 if (ds & DEP_TRUE)
4629 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4630 /* An output dependence can't be speculative at all. */
4631 gcc_assert (!(ds & DEP_OUTPUT));
4632 if (ds & DEP_ANTI)
4633 gcc_assert (ds & BEGIN_CONTROL);
4636 #endif /* ENABLE_CHECKING */
4638 /* The following code discovers opportunities to switch a memory reference
4639 and an increment by modifying the address. We ensure that this is done
4640 only for dependencies that are only used to show a single register
4641 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4642 instruction involved is subject to only one dep that can cause a pattern
4643 change.
4645 When we discover a suitable dependency, we fill in the dep_replacement
4646 structure to show how to modify the memory reference. */
4648 /* Holds information about a pair of memory reference and register increment
4649 insns which depend on each other, but could possibly be interchanged. */
4650 struct mem_inc_info
4652 rtx_insn *inc_insn;
4653 rtx_insn *mem_insn;
4655 rtx *mem_loc;
4656 /* A register occurring in the memory address for which we wish to break
4657 the dependence. This must be identical to the destination register of
4658 the increment. */
4659 rtx mem_reg0;
4660 /* Any kind of index that is added to that register. */
4661 rtx mem_index;
4662 /* The constant offset used in the memory address. */
4663 HOST_WIDE_INT mem_constant;
4664 /* The constant added in the increment insn. Negated if the increment is
4665 after the memory address. */
4666 HOST_WIDE_INT inc_constant;
4667 /* The source register used in the increment. May be different from mem_reg0
4668 if the increment occurs before the memory address. */
4669 rtx inc_input;
4672 /* Verify that the memory location described in MII can be replaced with
4673 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4674 insn remains unchanged by this function. */
4676 static rtx
4677 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4679 rtx mem = *mii->mem_loc;
4680 rtx new_mem;
4682 /* Jump through a lot of hoops to keep the attributes up to date. We
4683 do not want to call one of the change address variants that take
4684 an offset even though we know the offset in many cases. These
4685 assume you are changing where the address is pointing by the
4686 offset. */
4687 new_mem = replace_equiv_address_nv (mem, new_addr);
4688 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4690 if (sched_verbose >= 5)
4691 fprintf (sched_dump, "validation failure\n");
4692 return NULL_RTX;
4695 /* Put back the old one. */
4696 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4698 return new_mem;
4701 /* Return true if INSN is of a form "a = b op c" where a and b are
4702 regs. op is + if c is a reg and +|- if c is a const. Fill in
4703 informantion in MII about what is found.
4704 BEFORE_MEM indicates whether the increment is found before or after
4705 a corresponding memory reference. */
4707 static bool
4708 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4710 rtx pat = single_set (insn);
4711 rtx src, cst;
4712 bool regs_equal;
4714 if (RTX_FRAME_RELATED_P (insn) || !pat)
4715 return false;
4717 /* Result must be single reg. */
4718 if (!REG_P (SET_DEST (pat)))
4719 return false;
4721 if (GET_CODE (SET_SRC (pat)) != PLUS)
4722 return false;
4724 mii->inc_insn = insn;
4725 src = SET_SRC (pat);
4726 mii->inc_input = XEXP (src, 0);
4728 if (!REG_P (XEXP (src, 0)))
4729 return false;
4731 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4732 return false;
4734 cst = XEXP (src, 1);
4735 if (!CONST_INT_P (cst))
4736 return false;
4737 mii->inc_constant = INTVAL (cst);
4739 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4741 if (!before_mem)
4743 mii->inc_constant = -mii->inc_constant;
4744 if (!regs_equal)
4745 return false;
4748 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4750 /* Note that the sign has already been reversed for !before_mem. */
4751 #ifdef STACK_GROWS_DOWNWARD
4752 return mii->inc_constant > 0;
4753 #else
4754 return mii->inc_constant < 0;
4755 #endif
4757 return true;
4760 /* Once a suitable mem reference has been found and the corresponding data
4761 in MII has been filled in, this function is called to find a suitable
4762 add or inc insn involving the register we found in the memory
4763 reference. */
4765 static bool
4766 find_inc (struct mem_inc_info *mii, bool backwards)
4768 sd_iterator_def sd_it;
4769 dep_t dep;
4771 sd_it = sd_iterator_start (mii->mem_insn,
4772 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4773 while (sd_iterator_cond (&sd_it, &dep))
4775 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4776 rtx_insn *pro = DEP_PRO (dep);
4777 rtx_insn *con = DEP_CON (dep);
4778 rtx_insn *inc_cand = backwards ? pro : con;
4779 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4780 goto next;
4781 if (parse_add_or_inc (mii, inc_cand, backwards))
4783 struct dep_replacement *desc;
4784 df_ref def;
4785 rtx newaddr, newmem;
4787 if (sched_verbose >= 5)
4788 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4789 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4791 /* Need to assure that none of the operands of the inc
4792 instruction are assigned to by the mem insn. */
4793 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4794 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4795 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4797 if (sched_verbose >= 5)
4798 fprintf (sched_dump,
4799 "inc conflicts with store failure.\n");
4800 goto next;
4803 newaddr = mii->inc_input;
4804 if (mii->mem_index != NULL_RTX)
4805 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4806 mii->mem_index);
4807 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4808 mii->mem_constant + mii->inc_constant);
4809 newmem = attempt_change (mii, newaddr);
4810 if (newmem == NULL_RTX)
4811 goto next;
4812 if (sched_verbose >= 5)
4813 fprintf (sched_dump, "successful address replacement\n");
4814 desc = XCNEW (struct dep_replacement);
4815 DEP_REPLACE (dep) = desc;
4816 desc->loc = mii->mem_loc;
4817 desc->newval = newmem;
4818 desc->orig = *desc->loc;
4819 desc->insn = mii->mem_insn;
4820 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4821 INSN_SPEC_BACK_DEPS (con));
4822 if (backwards)
4824 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4825 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4826 REG_DEP_TRUE);
4828 else
4830 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4831 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4832 REG_DEP_ANTI);
4834 return true;
4836 next:
4837 sd_iterator_next (&sd_it);
4839 return false;
4842 /* A recursive function that walks ADDRESS_OF_X to find memory references
4843 which could be modified during scheduling. We call find_inc for each
4844 one we find that has a recognizable form. MII holds information about
4845 the pair of memory/increment instructions.
4846 We ensure that every instruction with a memory reference (which will be
4847 the location of the replacement) is assigned at most one breakable
4848 dependency. */
4850 static bool
4851 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4853 rtx x = *address_of_x;
4854 enum rtx_code code = GET_CODE (x);
4855 const char *const fmt = GET_RTX_FORMAT (code);
4856 int i;
4858 if (code == MEM)
4860 rtx reg0 = XEXP (x, 0);
4862 mii->mem_loc = address_of_x;
4863 mii->mem_index = NULL_RTX;
4864 mii->mem_constant = 0;
4865 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4867 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4868 reg0 = XEXP (reg0, 0);
4870 if (GET_CODE (reg0) == PLUS)
4872 mii->mem_index = XEXP (reg0, 1);
4873 reg0 = XEXP (reg0, 0);
4875 if (REG_P (reg0))
4877 df_ref use;
4878 int occurrences = 0;
4880 /* Make sure this reg appears only once in this insn. Can't use
4881 count_occurrences since that only works for pseudos. */
4882 FOR_EACH_INSN_USE (use, mii->mem_insn)
4883 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4884 if (++occurrences > 1)
4886 if (sched_verbose >= 5)
4887 fprintf (sched_dump, "mem count failure\n");
4888 return false;
4891 mii->mem_reg0 = reg0;
4892 return find_inc (mii, true) || find_inc (mii, false);
4894 return false;
4897 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4899 /* If REG occurs inside a MEM used in a bit-field reference,
4900 that is unacceptable. */
4901 return false;
4904 /* Time for some deep diving. */
4905 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4907 if (fmt[i] == 'e')
4909 if (find_mem (mii, &XEXP (x, i)))
4910 return true;
4912 else if (fmt[i] == 'E')
4914 int j;
4915 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4916 if (find_mem (mii, &XVECEXP (x, i, j)))
4917 return true;
4920 return false;
4924 /* Examine the instructions between HEAD and TAIL and try to find
4925 dependencies that can be broken by modifying one of the patterns. */
4927 void
4928 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4930 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4931 int success_in_block = 0;
4933 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4935 struct mem_inc_info mii;
4937 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4938 continue;
4940 mii.mem_insn = insn;
4941 if (find_mem (&mii, &PATTERN (insn)))
4942 success_in_block++;
4944 if (success_in_block && sched_verbose >= 5)
4945 fprintf (sched_dump, "%d candidates for address modification found.\n",
4946 success_in_block);
4949 #endif /* INSN_SCHEDULING */