[AArch64] PR target/68129: Define TARGET_SUPPORTS_WIDE_INT
[official-gcc.git] / gcc / sched-deps.c
blob4f64fa9d78df060a624022101ee2fa547530ae5a
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "insn-attr.h"
36 #include "cfgbuild.h"
37 #include "sched-int.h"
38 #include "params.h"
39 #include "cselib.h"
41 #ifdef INSN_SCHEDULING
43 /* Holds current parameters for the dependency analyzer. */
44 struct sched_deps_info_def *sched_deps_info;
46 /* The data is specific to the Haifa scheduler. */
47 vec<haifa_deps_insn_data_def>
48 h_d_i_d = vNULL;
50 /* Return the major type present in the DS. */
51 enum reg_note
52 ds_to_dk (ds_t ds)
54 if (ds & DEP_TRUE)
55 return REG_DEP_TRUE;
57 if (ds & DEP_OUTPUT)
58 return REG_DEP_OUTPUT;
60 if (ds & DEP_CONTROL)
61 return REG_DEP_CONTROL;
63 gcc_assert (ds & DEP_ANTI);
65 return REG_DEP_ANTI;
68 /* Return equivalent dep_status. */
69 ds_t
70 dk_to_ds (enum reg_note dk)
72 switch (dk)
74 case REG_DEP_TRUE:
75 return DEP_TRUE;
77 case REG_DEP_OUTPUT:
78 return DEP_OUTPUT;
80 case REG_DEP_CONTROL:
81 return DEP_CONTROL;
83 default:
84 gcc_assert (dk == REG_DEP_ANTI);
85 return DEP_ANTI;
89 /* Functions to operate with dependence information container - dep_t. */
91 /* Init DEP with the arguments. */
92 void
93 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 DEP_PRO (dep) = pro;
96 DEP_CON (dep) = con;
97 DEP_TYPE (dep) = type;
98 DEP_STATUS (dep) = ds;
99 DEP_COST (dep) = UNKNOWN_DEP_COST;
100 DEP_NONREG (dep) = 0;
101 DEP_MULTIPLE (dep) = 0;
102 DEP_REPLACE (dep) = NULL;
105 /* Init DEP with the arguments.
106 While most of the scheduler (including targets) only need the major type
107 of the dependency, it is convenient to hide full dep_status from them. */
108 void
109 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
111 ds_t ds;
113 if ((current_sched_info->flags & USE_DEPS_LIST))
114 ds = dk_to_ds (kind);
115 else
116 ds = 0;
118 init_dep_1 (dep, pro, con, kind, ds);
121 /* Make a copy of FROM in TO. */
122 static void
123 copy_dep (dep_t to, dep_t from)
125 memcpy (to, from, sizeof (*to));
128 static void dump_ds (FILE *, ds_t);
130 /* Define flags for dump_dep (). */
132 /* Dump producer of the dependence. */
133 #define DUMP_DEP_PRO (2)
135 /* Dump consumer of the dependence. */
136 #define DUMP_DEP_CON (4)
138 /* Dump type of the dependence. */
139 #define DUMP_DEP_TYPE (8)
141 /* Dump status of the dependence. */
142 #define DUMP_DEP_STATUS (16)
144 /* Dump all information about the dependence. */
145 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
146 |DUMP_DEP_STATUS)
148 /* Dump DEP to DUMP.
149 FLAGS is a bit mask specifying what information about DEP needs
150 to be printed.
151 If FLAGS has the very first bit set, then dump all information about DEP
152 and propagate this bit into the callee dump functions. */
153 static void
154 dump_dep (FILE *dump, dep_t dep, int flags)
156 if (flags & 1)
157 flags |= DUMP_DEP_ALL;
159 fprintf (dump, "<");
161 if (flags & DUMP_DEP_PRO)
162 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164 if (flags & DUMP_DEP_CON)
165 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167 if (flags & DUMP_DEP_TYPE)
169 char t;
170 enum reg_note type = DEP_TYPE (dep);
172 switch (type)
174 case REG_DEP_TRUE:
175 t = 't';
176 break;
178 case REG_DEP_OUTPUT:
179 t = 'o';
180 break;
182 case REG_DEP_CONTROL:
183 t = 'c';
184 break;
186 case REG_DEP_ANTI:
187 t = 'a';
188 break;
190 default:
191 gcc_unreachable ();
192 break;
195 fprintf (dump, "%c; ", t);
198 if (flags & DUMP_DEP_STATUS)
200 if (current_sched_info->flags & USE_DEPS_LIST)
201 dump_ds (dump, DEP_STATUS (dep));
204 fprintf (dump, ">");
207 /* Default flags for dump_dep (). */
208 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
210 /* Dump all fields of DEP to STDERR. */
211 void
212 sd_debug_dep (dep_t dep)
214 dump_dep (stderr, dep, 1);
215 fprintf (stderr, "\n");
218 /* Determine whether DEP is a dependency link of a non-debug insn on a
219 debug insn. */
221 static inline bool
222 depl_on_debug_p (dep_link_t dep)
224 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
225 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228 /* Functions to operate with a single link from the dependencies lists -
229 dep_link_t. */
231 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
232 PREV_NEXT_P. */
233 static void
234 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
236 dep_link_t next = *prev_nextp;
238 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
239 && DEP_LINK_NEXT (l) == NULL);
241 /* Init node being inserted. */
242 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
243 DEP_LINK_NEXT (l) = next;
245 /* Fix next node. */
246 if (next != NULL)
248 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
250 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253 /* Fix prev node. */
254 *prev_nextp = l;
257 /* Add dep_link LINK to deps_list L. */
258 static void
259 add_to_deps_list (dep_link_t link, deps_list_t l)
261 attach_dep_link (link, &DEPS_LIST_FIRST (l));
263 /* Don't count debug deps. */
264 if (!depl_on_debug_p (link))
265 ++DEPS_LIST_N_LINKS (l);
268 /* Detach dep_link L from the list. */
269 static void
270 detach_dep_link (dep_link_t l)
272 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
273 dep_link_t next = DEP_LINK_NEXT (l);
275 *prev_nextp = next;
277 if (next != NULL)
278 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
280 DEP_LINK_PREV_NEXTP (l) = NULL;
281 DEP_LINK_NEXT (l) = NULL;
284 /* Remove link LINK from list LIST. */
285 static void
286 remove_from_deps_list (dep_link_t link, deps_list_t list)
288 detach_dep_link (link);
290 /* Don't count debug deps. */
291 if (!depl_on_debug_p (link))
292 --DEPS_LIST_N_LINKS (list);
295 /* Move link LINK from list FROM to list TO. */
296 static void
297 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
299 remove_from_deps_list (link, from);
300 add_to_deps_list (link, to);
303 /* Return true of LINK is not attached to any list. */
304 static bool
305 dep_link_is_detached_p (dep_link_t link)
307 return DEP_LINK_PREV_NEXTP (link) == NULL;
310 /* Pool to hold all dependency nodes (dep_node_t). */
311 static object_allocator<_dep_node> *dn_pool;
313 /* Number of dep_nodes out there. */
314 static int dn_pool_diff = 0;
316 /* Create a dep_node. */
317 static dep_node_t
318 create_dep_node (void)
320 dep_node_t n = dn_pool->allocate ();
321 dep_link_t back = DEP_NODE_BACK (n);
322 dep_link_t forw = DEP_NODE_FORW (n);
324 DEP_LINK_NODE (back) = n;
325 DEP_LINK_NEXT (back) = NULL;
326 DEP_LINK_PREV_NEXTP (back) = NULL;
328 DEP_LINK_NODE (forw) = n;
329 DEP_LINK_NEXT (forw) = NULL;
330 DEP_LINK_PREV_NEXTP (forw) = NULL;
332 ++dn_pool_diff;
334 return n;
337 /* Delete dep_node N. N must not be connected to any deps_list. */
338 static void
339 delete_dep_node (dep_node_t n)
341 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
342 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
344 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
346 --dn_pool_diff;
348 dn_pool->remove (n);
351 /* Pool to hold dependencies lists (deps_list_t). */
352 static object_allocator<_deps_list> *dl_pool;
354 /* Number of deps_lists out there. */
355 static int dl_pool_diff = 0;
357 /* Functions to operate with dependences lists - deps_list_t. */
359 /* Return true if list L is empty. */
360 static bool
361 deps_list_empty_p (deps_list_t l)
363 return DEPS_LIST_N_LINKS (l) == 0;
366 /* Create a new deps_list. */
367 static deps_list_t
368 create_deps_list (void)
370 deps_list_t l = dl_pool->allocate ();
372 DEPS_LIST_FIRST (l) = NULL;
373 DEPS_LIST_N_LINKS (l) = 0;
375 ++dl_pool_diff;
376 return l;
379 /* Free deps_list L. */
380 static void
381 free_deps_list (deps_list_t l)
383 gcc_assert (deps_list_empty_p (l));
385 --dl_pool_diff;
387 dl_pool->remove (l);
390 /* Return true if there is no dep_nodes and deps_lists out there.
391 After the region is scheduled all the dependency nodes and lists
392 should [generally] be returned to pool. */
393 bool
394 deps_pools_are_empty_p (void)
396 return dn_pool_diff == 0 && dl_pool_diff == 0;
399 /* Remove all elements from L. */
400 static void
401 clear_deps_list (deps_list_t l)
405 dep_link_t link = DEPS_LIST_FIRST (l);
407 if (link == NULL)
408 break;
410 remove_from_deps_list (link, l);
412 while (1);
415 /* Decide whether a dependency should be treated as a hard or a speculative
416 dependency. */
417 static bool
418 dep_spec_p (dep_t dep)
420 if (current_sched_info->flags & DO_SPECULATION)
422 if (DEP_STATUS (dep) & SPECULATIVE)
423 return true;
425 if (current_sched_info->flags & DO_PREDICATION)
427 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
428 return true;
430 if (DEP_REPLACE (dep) != NULL)
431 return true;
432 return false;
435 static regset reg_pending_sets;
436 static regset reg_pending_clobbers;
437 static regset reg_pending_uses;
438 static regset reg_pending_control_uses;
439 static enum reg_pending_barrier_mode reg_pending_barrier;
441 /* Hard registers implicitly clobbered or used (or may be implicitly
442 clobbered or used) by the currently analyzed insn. For example,
443 insn in its constraint has one register class. Even if there is
444 currently no hard register in the insn, the particular hard
445 register will be in the insn after reload pass because the
446 constraint requires it. */
447 static HARD_REG_SET implicit_reg_pending_clobbers;
448 static HARD_REG_SET implicit_reg_pending_uses;
450 /* To speed up the test for duplicate dependency links we keep a
451 record of dependencies created by add_dependence when the average
452 number of instructions in a basic block is very large.
454 Studies have shown that there is typically around 5 instructions between
455 branches for typical C code. So we can make a guess that the average
456 basic block is approximately 5 instructions long; we will choose 100X
457 the average size as a very large basic block.
459 Each insn has associated bitmaps for its dependencies. Each bitmap
460 has enough entries to represent a dependency on any other insn in
461 the insn chain. All bitmap for true dependencies cache is
462 allocated then the rest two ones are also allocated. */
463 static bitmap_head *true_dependency_cache = NULL;
464 static bitmap_head *output_dependency_cache = NULL;
465 static bitmap_head *anti_dependency_cache = NULL;
466 static bitmap_head *control_dependency_cache = NULL;
467 static bitmap_head *spec_dependency_cache = NULL;
468 static int cache_size;
470 /* True if we should mark added dependencies as a non-register deps. */
471 static bool mark_as_hard;
473 static int deps_may_trap_p (const_rtx);
474 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
475 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
476 enum reg_note, bool);
477 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
478 rtx_insn_list **, int, enum reg_note,
479 bool);
480 static void delete_all_dependences (rtx_insn *);
481 static void chain_to_prev_insn (rtx_insn *);
483 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
484 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
485 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
486 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
488 static bool sched_has_condition_p (const rtx_insn *);
489 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
491 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
492 rtx, rtx);
493 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
495 static void check_dep (dep_t, bool);
498 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
500 static int
501 deps_may_trap_p (const_rtx mem)
503 const_rtx addr = XEXP (mem, 0);
505 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507 const_rtx t = get_reg_known_value (REGNO (addr));
508 if (t)
509 addr = t;
511 return rtx_addr_can_trap_p (addr);
515 /* Find the condition under which INSN is executed. If REV is not NULL,
516 it is set to TRUE when the returned comparison should be reversed
517 to get the actual condition. */
518 static rtx
519 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
521 rtx pat = PATTERN (insn);
522 rtx src;
524 if (rev)
525 *rev = false;
527 if (GET_CODE (pat) == COND_EXEC)
528 return COND_EXEC_TEST (pat);
530 if (!any_condjump_p (insn) || !onlyjump_p (insn))
531 return 0;
533 src = SET_SRC (pc_set (insn));
535 if (XEXP (src, 2) == pc_rtx)
536 return XEXP (src, 0);
537 else if (XEXP (src, 1) == pc_rtx)
539 rtx cond = XEXP (src, 0);
540 enum rtx_code revcode = reversed_comparison_code (cond, insn);
542 if (revcode == UNKNOWN)
543 return 0;
545 if (rev)
546 *rev = true;
547 return cond;
550 return 0;
553 /* Return the condition under which INSN does not execute (i.e. the
554 not-taken condition for a conditional branch), or NULL if we cannot
555 find such a condition. The caller should make a copy of the condition
556 before using it. */
558 sched_get_reverse_condition_uncached (const rtx_insn *insn)
560 bool rev;
561 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
562 if (cond == NULL_RTX)
563 return cond;
564 if (!rev)
566 enum rtx_code revcode = reversed_comparison_code (cond, insn);
567 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
568 XEXP (cond, 0),
569 XEXP (cond, 1));
571 return cond;
574 /* Caching variant of sched_get_condition_with_rev_uncached.
575 We only do actual work the first time we come here for an insn; the
576 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
577 static rtx
578 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
580 bool tmp;
582 if (INSN_LUID (insn) == 0)
583 return sched_get_condition_with_rev_uncached (insn, rev);
585 if (INSN_CACHED_COND (insn) == const_true_rtx)
586 return NULL_RTX;
588 if (INSN_CACHED_COND (insn) != NULL_RTX)
590 if (rev)
591 *rev = INSN_REVERSE_COND (insn);
592 return INSN_CACHED_COND (insn);
595 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
596 INSN_REVERSE_COND (insn) = tmp;
598 if (INSN_CACHED_COND (insn) == NULL_RTX)
600 INSN_CACHED_COND (insn) = const_true_rtx;
601 return NULL_RTX;
604 if (rev)
605 *rev = INSN_REVERSE_COND (insn);
606 return INSN_CACHED_COND (insn);
609 /* True when we can find a condition under which INSN is executed. */
610 static bool
611 sched_has_condition_p (const rtx_insn *insn)
613 return !! sched_get_condition_with_rev (insn, NULL);
618 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
619 static int
620 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622 if (COMPARISON_P (cond1)
623 && COMPARISON_P (cond2)
624 && GET_CODE (cond1) ==
625 (rev1==rev2
626 ? reversed_comparison_code (cond2, NULL)
627 : GET_CODE (cond2))
628 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
629 && XEXP (cond1, 1) == XEXP (cond2, 1))
630 return 1;
631 return 0;
634 /* Return true if insn1 and insn2 can never depend on one another because
635 the conditions under which they are executed are mutually exclusive. */
636 bool
637 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
639 rtx cond1, cond2;
640 bool rev1 = false, rev2 = false;
642 /* df doesn't handle conditional lifetimes entirely correctly;
643 calls mess up the conditional lifetimes. */
644 if (!CALL_P (insn1) && !CALL_P (insn2))
646 cond1 = sched_get_condition_with_rev (insn1, &rev1);
647 cond2 = sched_get_condition_with_rev (insn2, &rev2);
648 if (cond1 && cond2
649 && conditions_mutex_p (cond1, cond2, rev1, rev2)
650 /* Make sure first instruction doesn't affect condition of second
651 instruction if switched. */
652 && !modified_in_p (cond1, insn2)
653 /* Make sure second instruction doesn't affect condition of first
654 instruction if switched. */
655 && !modified_in_p (cond2, insn1))
656 return true;
658 return false;
662 /* Return true if INSN can potentially be speculated with type DS. */
663 bool
664 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
666 if (HAS_INTERNAL_DEP (insn))
667 return false;
669 if (!NONJUMP_INSN_P (insn))
670 return false;
672 if (SCHED_GROUP_P (insn))
673 return false;
675 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
676 return false;
678 if (side_effects_p (PATTERN (insn)))
679 return false;
681 if (ds & BE_IN_SPEC)
682 /* The following instructions, which depend on a speculatively scheduled
683 instruction, cannot be speculatively scheduled along. */
685 if (may_trap_or_fault_p (PATTERN (insn)))
686 /* If instruction might fault, it cannot be speculatively scheduled.
687 For control speculation it's obvious why and for data speculation
688 it's because the insn might get wrong input if speculation
689 wasn't successful. */
690 return false;
692 if ((ds & BE_IN_DATA)
693 && sched_has_condition_p (insn))
694 /* If this is a predicated instruction, then it cannot be
695 speculatively scheduled. See PR35659. */
696 return false;
699 return true;
702 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
703 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
704 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
705 This function is used to switch sd_iterator to the next list.
706 !!! For internal use only. Might consider moving it to sched-int.h. */
707 void
708 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
709 deps_list_t *list_ptr, bool *resolved_p_ptr)
711 sd_list_types_def types = *types_ptr;
713 if (types & SD_LIST_HARD_BACK)
715 *list_ptr = INSN_HARD_BACK_DEPS (insn);
716 *resolved_p_ptr = false;
717 *types_ptr = types & ~SD_LIST_HARD_BACK;
719 else if (types & SD_LIST_SPEC_BACK)
721 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
722 *resolved_p_ptr = false;
723 *types_ptr = types & ~SD_LIST_SPEC_BACK;
725 else if (types & SD_LIST_FORW)
727 *list_ptr = INSN_FORW_DEPS (insn);
728 *resolved_p_ptr = false;
729 *types_ptr = types & ~SD_LIST_FORW;
731 else if (types & SD_LIST_RES_BACK)
733 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
734 *resolved_p_ptr = true;
735 *types_ptr = types & ~SD_LIST_RES_BACK;
737 else if (types & SD_LIST_RES_FORW)
739 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
740 *resolved_p_ptr = true;
741 *types_ptr = types & ~SD_LIST_RES_FORW;
743 else
745 *list_ptr = NULL;
746 *resolved_p_ptr = false;
747 *types_ptr = SD_LIST_NONE;
751 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
753 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
755 int size = 0;
757 while (list_types != SD_LIST_NONE)
759 deps_list_t list;
760 bool resolved_p;
762 sd_next_list (insn, &list_types, &list, &resolved_p);
763 if (list)
764 size += DEPS_LIST_N_LINKS (list);
767 return size;
770 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
772 bool
773 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775 while (list_types != SD_LIST_NONE)
777 deps_list_t list;
778 bool resolved_p;
780 sd_next_list (insn, &list_types, &list, &resolved_p);
781 if (!deps_list_empty_p (list))
782 return false;
785 return true;
788 /* Initialize data for INSN. */
789 void
790 sd_init_insn (rtx_insn *insn)
792 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
793 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
794 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
795 INSN_FORW_DEPS (insn) = create_deps_list ();
796 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798 /* ??? It would be nice to allocate dependency caches here. */
801 /* Free data for INSN. */
802 void
803 sd_finish_insn (rtx_insn *insn)
805 /* ??? It would be nice to deallocate dependency caches here. */
807 free_deps_list (INSN_HARD_BACK_DEPS (insn));
808 INSN_HARD_BACK_DEPS (insn) = NULL;
810 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
811 INSN_SPEC_BACK_DEPS (insn) = NULL;
813 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
814 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816 free_deps_list (INSN_FORW_DEPS (insn));
817 INSN_FORW_DEPS (insn) = NULL;
819 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
820 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
823 /* Find a dependency between producer PRO and consumer CON.
824 Search through resolved dependency lists if RESOLVED_P is true.
825 If no such dependency is found return NULL,
826 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
827 with an iterator pointing to it. */
828 static dep_t
829 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
830 sd_iterator_def *sd_it_ptr)
832 sd_list_types_def pro_list_type;
833 sd_list_types_def con_list_type;
834 sd_iterator_def sd_it;
835 dep_t dep;
836 bool found_p = false;
838 if (resolved_p)
840 pro_list_type = SD_LIST_RES_FORW;
841 con_list_type = SD_LIST_RES_BACK;
843 else
845 pro_list_type = SD_LIST_FORW;
846 con_list_type = SD_LIST_BACK;
849 /* Walk through either back list of INSN or forw list of ELEM
850 depending on which one is shorter. */
851 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853 /* Find the dep_link with producer PRO in consumer's back_deps. */
854 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
855 if (DEP_PRO (dep) == pro)
857 found_p = true;
858 break;
861 else
863 /* Find the dep_link with consumer CON in producer's forw_deps. */
864 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
865 if (DEP_CON (dep) == con)
867 found_p = true;
868 break;
872 if (found_p)
874 if (sd_it_ptr != NULL)
875 *sd_it_ptr = sd_it;
877 return dep;
880 return NULL;
883 /* Find a dependency between producer PRO and consumer CON.
884 Use dependency [if available] to check if dependency is present at all.
885 Search through resolved dependency lists if RESOLVED_P is true.
886 If the dependency or NULL if none found. */
887 dep_t
888 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890 if (true_dependency_cache != NULL)
891 /* Avoiding the list walk below can cut compile times dramatically
892 for some code. */
894 int elem_luid = INSN_LUID (pro);
895 int insn_luid = INSN_LUID (con);
897 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
898 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
899 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
900 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
901 return NULL;
904 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
907 /* Add or update a dependence described by DEP.
908 MEM1 and MEM2, if non-null, correspond to memory locations in case of
909 data speculation.
911 The function returns a value indicating if an old entry has been changed
912 or a new entry has been added to insn's backward deps.
914 This function merely checks if producer and consumer is the same insn
915 and doesn't create a dep in this case. Actual manipulation of
916 dependence data structures is performed in add_or_update_dep_1. */
917 static enum DEPS_ADJUST_RESULT
918 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920 rtx_insn *elem = DEP_PRO (dep);
921 rtx_insn *insn = DEP_CON (dep);
923 gcc_assert (INSN_P (insn) && INSN_P (elem));
925 /* Don't depend an insn on itself. */
926 if (insn == elem)
928 if (sched_deps_info->generate_spec_deps)
929 /* INSN has an internal dependence, which we can't overcome. */
930 HAS_INTERNAL_DEP (insn) = 1;
932 return DEP_NODEP;
935 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
938 /* Ask dependency caches what needs to be done for dependence DEP.
939 Return DEP_CREATED if new dependence should be created and there is no
940 need to try to find one searching the dependencies lists.
941 Return DEP_PRESENT if there already is a dependence described by DEP and
942 hence nothing is to be done.
943 Return DEP_CHANGED if there already is a dependence, but it should be
944 updated to incorporate additional information from DEP. */
945 static enum DEPS_ADJUST_RESULT
946 ask_dependency_caches (dep_t dep)
948 int elem_luid = INSN_LUID (DEP_PRO (dep));
949 int insn_luid = INSN_LUID (DEP_CON (dep));
951 gcc_assert (true_dependency_cache != NULL
952 && output_dependency_cache != NULL
953 && anti_dependency_cache != NULL
954 && control_dependency_cache != NULL);
956 if (!(current_sched_info->flags & USE_DEPS_LIST))
958 enum reg_note present_dep_type;
960 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
961 present_dep_type = REG_DEP_TRUE;
962 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
963 present_dep_type = REG_DEP_OUTPUT;
964 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
965 present_dep_type = REG_DEP_ANTI;
966 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
967 present_dep_type = REG_DEP_CONTROL;
968 else
969 /* There is no existing dep so it should be created. */
970 return DEP_CREATED;
972 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
973 /* DEP does not add anything to the existing dependence. */
974 return DEP_PRESENT;
976 else
978 ds_t present_dep_types = 0;
980 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 present_dep_types |= DEP_TRUE;
982 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 present_dep_types |= DEP_OUTPUT;
984 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 present_dep_types |= DEP_ANTI;
986 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 present_dep_types |= DEP_CONTROL;
989 if (present_dep_types == 0)
990 /* There is no existing dep so it should be created. */
991 return DEP_CREATED;
993 if (!(current_sched_info->flags & DO_SPECULATION)
994 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
997 == present_dep_types)
998 /* DEP does not add anything to the existing dependence. */
999 return DEP_PRESENT;
1001 else
1003 /* Only true dependencies can be data speculative and
1004 only anti dependencies can be control speculative. */
1005 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1006 == present_dep_types);
1008 /* if (DEP is SPECULATIVE) then
1009 ..we should update DEP_STATUS
1010 else
1011 ..we should reset existing dep to non-speculative. */
1015 return DEP_CHANGED;
1018 /* Set dependency caches according to DEP. */
1019 static void
1020 set_dependency_caches (dep_t dep)
1022 int elem_luid = INSN_LUID (DEP_PRO (dep));
1023 int insn_luid = INSN_LUID (DEP_CON (dep));
1025 if (!(current_sched_info->flags & USE_DEPS_LIST))
1027 switch (DEP_TYPE (dep))
1029 case REG_DEP_TRUE:
1030 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1031 break;
1033 case REG_DEP_OUTPUT:
1034 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1035 break;
1037 case REG_DEP_ANTI:
1038 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1039 break;
1041 case REG_DEP_CONTROL:
1042 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1043 break;
1045 default:
1046 gcc_unreachable ();
1049 else
1051 ds_t ds = DEP_STATUS (dep);
1053 if (ds & DEP_TRUE)
1054 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1055 if (ds & DEP_OUTPUT)
1056 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1057 if (ds & DEP_ANTI)
1058 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059 if (ds & DEP_CONTROL)
1060 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062 if (ds & SPECULATIVE)
1064 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1065 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1070 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1071 caches accordingly. */
1072 static void
1073 update_dependency_caches (dep_t dep, enum reg_note old_type)
1075 int elem_luid = INSN_LUID (DEP_PRO (dep));
1076 int insn_luid = INSN_LUID (DEP_CON (dep));
1078 /* Clear corresponding cache entry because type of the link
1079 may have changed. Keep them if we use_deps_list. */
1080 if (!(current_sched_info->flags & USE_DEPS_LIST))
1082 switch (old_type)
1084 case REG_DEP_OUTPUT:
1085 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1086 break;
1088 case REG_DEP_ANTI:
1089 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1090 break;
1092 case REG_DEP_CONTROL:
1093 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1094 break;
1096 default:
1097 gcc_unreachable ();
1101 set_dependency_caches (dep);
1104 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1105 static void
1106 change_spec_dep_to_hard (sd_iterator_def sd_it)
1108 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1109 dep_link_t link = DEP_NODE_BACK (node);
1110 dep_t dep = DEP_NODE_DEP (node);
1111 rtx_insn *elem = DEP_PRO (dep);
1112 rtx_insn *insn = DEP_CON (dep);
1114 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116 DEP_STATUS (dep) &= ~SPECULATIVE;
1118 if (true_dependency_cache != NULL)
1119 /* Clear the cache entry. */
1120 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1121 INSN_LUID (elem));
1124 /* Update DEP to incorporate information from NEW_DEP.
1125 SD_IT points to DEP in case it should be moved to another list.
1126 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1127 data-speculative dependence should be updated. */
1128 static enum DEPS_ADJUST_RESULT
1129 update_dep (dep_t dep, dep_t new_dep,
1130 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1131 rtx mem1 ATTRIBUTE_UNUSED,
1132 rtx mem2 ATTRIBUTE_UNUSED)
1134 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1135 enum reg_note old_type = DEP_TYPE (dep);
1136 bool was_spec = dep_spec_p (dep);
1138 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1139 DEP_MULTIPLE (dep) = 1;
1141 /* If this is a more restrictive type of dependence than the
1142 existing one, then change the existing dependence to this
1143 type. */
1144 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1147 res = DEP_CHANGED;
1150 if (current_sched_info->flags & USE_DEPS_LIST)
1151 /* Update DEP_STATUS. */
1153 ds_t dep_status = DEP_STATUS (dep);
1154 ds_t ds = DEP_STATUS (new_dep);
1155 ds_t new_status = ds | dep_status;
1157 if (new_status & SPECULATIVE)
1159 /* Either existing dep or a dep we're adding or both are
1160 speculative. */
1161 if (!(ds & SPECULATIVE)
1162 || !(dep_status & SPECULATIVE))
1163 /* The new dep can't be speculative. */
1164 new_status &= ~SPECULATIVE;
1165 else
1167 /* Both are speculative. Merge probabilities. */
1168 if (mem1 != NULL)
1170 dw_t dw;
1172 dw = estimate_dep_weak (mem1, mem2);
1173 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1176 new_status = ds_merge (dep_status, ds);
1180 ds = new_status;
1182 if (dep_status != ds)
1184 DEP_STATUS (dep) = ds;
1185 res = DEP_CHANGED;
1189 if (was_spec && !dep_spec_p (dep))
1190 /* The old dep was speculative, but now it isn't. */
1191 change_spec_dep_to_hard (sd_it);
1193 if (true_dependency_cache != NULL
1194 && res == DEP_CHANGED)
1195 update_dependency_caches (dep, old_type);
1197 return res;
1200 /* Add or update a dependence described by DEP.
1201 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1202 data speculation.
1204 The function returns a value indicating if an old entry has been changed
1205 or a new entry has been added to insn's backward deps or nothing has
1206 been updated at all. */
1207 static enum DEPS_ADJUST_RESULT
1208 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1209 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211 bool maybe_present_p = true;
1212 bool present_p = false;
1214 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1215 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217 if (flag_checking)
1218 check_dep (new_dep, mem1 != NULL);
1220 if (true_dependency_cache != NULL)
1222 switch (ask_dependency_caches (new_dep))
1224 case DEP_PRESENT:
1225 dep_t present_dep;
1226 sd_iterator_def sd_it;
1228 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1229 DEP_CON (new_dep),
1230 resolved_p, &sd_it);
1231 DEP_MULTIPLE (present_dep) = 1;
1232 return DEP_PRESENT;
1234 case DEP_CHANGED:
1235 maybe_present_p = true;
1236 present_p = true;
1237 break;
1239 case DEP_CREATED:
1240 maybe_present_p = false;
1241 present_p = false;
1242 break;
1244 default:
1245 gcc_unreachable ();
1246 break;
1250 /* Check that we don't already have this dependence. */
1251 if (maybe_present_p)
1253 dep_t present_dep;
1254 sd_iterator_def sd_it;
1256 gcc_assert (true_dependency_cache == NULL || present_p);
1258 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1259 DEP_CON (new_dep),
1260 resolved_p, &sd_it);
1262 if (present_dep != NULL)
1263 /* We found an existing dependency between ELEM and INSN. */
1264 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1265 else
1266 /* We didn't find a dep, it shouldn't present in the cache. */
1267 gcc_assert (!present_p);
1270 /* Might want to check one level of transitivity to save conses.
1271 This check should be done in maybe_add_or_update_dep_1.
1272 Since we made it to add_or_update_dep_1, we must create
1273 (or update) a link. */
1275 if (mem1 != NULL_RTX)
1277 gcc_assert (sched_deps_info->generate_spec_deps);
1278 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1279 estimate_dep_weak (mem1, mem2));
1282 sd_add_dep (new_dep, resolved_p);
1284 return DEP_CREATED;
1287 /* Initialize BACK_LIST_PTR with consumer's backward list and
1288 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1289 initialize with lists that hold resolved deps. */
1290 static void
1291 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1292 deps_list_t *back_list_ptr,
1293 deps_list_t *forw_list_ptr)
1295 rtx_insn *con = DEP_CON (dep);
1297 if (!resolved_p)
1299 if (dep_spec_p (dep))
1300 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1301 else
1302 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1304 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1306 else
1308 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1309 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1313 /* Add dependence described by DEP.
1314 If RESOLVED_P is true treat the dependence as a resolved one. */
1315 void
1316 sd_add_dep (dep_t dep, bool resolved_p)
1318 dep_node_t n = create_dep_node ();
1319 deps_list_t con_back_deps;
1320 deps_list_t pro_forw_deps;
1321 rtx_insn *elem = DEP_PRO (dep);
1322 rtx_insn *insn = DEP_CON (dep);
1324 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1326 if ((current_sched_info->flags & DO_SPECULATION) == 0
1327 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1328 DEP_STATUS (dep) &= ~SPECULATIVE;
1330 copy_dep (DEP_NODE_DEP (n), dep);
1332 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1334 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1336 if (flag_checking)
1337 check_dep (dep, false);
1339 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1342 in the bitmap caches of dependency information. */
1343 if (true_dependency_cache != NULL)
1344 set_dependency_caches (dep);
1347 /* Add or update backward dependence between INSN and ELEM
1348 with given type DEP_TYPE and dep_status DS.
1349 This function is a convenience wrapper. */
1350 enum DEPS_ADJUST_RESULT
1351 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1353 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1356 /* Resolved dependence pointed to by SD_IT.
1357 SD_IT will advance to the next element. */
1358 void
1359 sd_resolve_dep (sd_iterator_def sd_it)
1361 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1362 dep_t dep = DEP_NODE_DEP (node);
1363 rtx_insn *pro = DEP_PRO (dep);
1364 rtx_insn *con = DEP_CON (dep);
1366 if (dep_spec_p (dep))
1367 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1368 INSN_RESOLVED_BACK_DEPS (con));
1369 else
1370 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1371 INSN_RESOLVED_BACK_DEPS (con));
1373 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1374 INSN_RESOLVED_FORW_DEPS (pro));
1377 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1378 pointed to by SD_IT to unresolved state. */
1379 void
1380 sd_unresolve_dep (sd_iterator_def sd_it)
1382 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1383 dep_t dep = DEP_NODE_DEP (node);
1384 rtx_insn *pro = DEP_PRO (dep);
1385 rtx_insn *con = DEP_CON (dep);
1387 if (dep_spec_p (dep))
1388 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1389 INSN_SPEC_BACK_DEPS (con));
1390 else
1391 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1392 INSN_HARD_BACK_DEPS (con));
1394 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1395 INSN_FORW_DEPS (pro));
1398 /* Make TO depend on all the FROM's producers.
1399 If RESOLVED_P is true add dependencies to the resolved lists. */
1400 void
1401 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1403 sd_list_types_def list_type;
1404 sd_iterator_def sd_it;
1405 dep_t dep;
1407 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1409 FOR_EACH_DEP (from, list_type, sd_it, dep)
1411 dep_def _new_dep, *new_dep = &_new_dep;
1413 copy_dep (new_dep, dep);
1414 DEP_CON (new_dep) = to;
1415 sd_add_dep (new_dep, resolved_p);
1419 /* Remove a dependency referred to by SD_IT.
1420 SD_IT will point to the next dependence after removal. */
1421 void
1422 sd_delete_dep (sd_iterator_def sd_it)
1424 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1425 dep_t dep = DEP_NODE_DEP (n);
1426 rtx_insn *pro = DEP_PRO (dep);
1427 rtx_insn *con = DEP_CON (dep);
1428 deps_list_t con_back_deps;
1429 deps_list_t pro_forw_deps;
1431 if (true_dependency_cache != NULL)
1433 int elem_luid = INSN_LUID (pro);
1434 int insn_luid = INSN_LUID (con);
1436 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1437 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1438 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1439 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1441 if (current_sched_info->flags & DO_SPECULATION)
1442 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1445 get_back_and_forw_lists (dep, sd_it.resolved_p,
1446 &con_back_deps, &pro_forw_deps);
1448 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1449 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1451 delete_dep_node (n);
1454 /* Dump size of the lists. */
1455 #define DUMP_LISTS_SIZE (2)
1457 /* Dump dependencies of the lists. */
1458 #define DUMP_LISTS_DEPS (4)
1460 /* Dump all information about the lists. */
1461 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1463 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1464 FLAGS is a bit mask specifying what information about the lists needs
1465 to be printed.
1466 If FLAGS has the very first bit set, then dump all information about
1467 the lists and propagate this bit into the callee dump functions. */
1468 static void
1469 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1471 sd_iterator_def sd_it;
1472 dep_t dep;
1473 int all;
1475 all = (flags & 1);
1477 if (all)
1478 flags |= DUMP_LISTS_ALL;
1480 fprintf (dump, "[");
1482 if (flags & DUMP_LISTS_SIZE)
1483 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1485 if (flags & DUMP_LISTS_DEPS)
1487 FOR_EACH_DEP (insn, types, sd_it, dep)
1489 dump_dep (dump, dep, dump_dep_flags | all);
1490 fprintf (dump, " ");
1495 /* Dump all information about deps_lists of INSN specified by TYPES
1496 to STDERR. */
1497 void
1498 sd_debug_lists (rtx insn, sd_list_types_def types)
1500 dump_lists (stderr, insn, types, 1);
1501 fprintf (stderr, "\n");
1504 /* A wrapper around add_dependence_1, to add a dependence of CON on
1505 PRO, with type DEP_TYPE. This function implements special handling
1506 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1507 the type to REG_DEP_ANTI if we can determine that predication is
1508 impossible; otherwise we add additional true dependencies on the
1509 INSN_COND_DEPS list of the jump (which PRO must be). */
1510 void
1511 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1513 if (dep_type == REG_DEP_CONTROL
1514 && !(current_sched_info->flags & DO_PREDICATION))
1515 dep_type = REG_DEP_ANTI;
1517 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1518 so we must also make the insn dependent on the setter of the
1519 condition. */
1520 if (dep_type == REG_DEP_CONTROL)
1522 rtx_insn *real_pro = pro;
1523 rtx_insn *other = real_insn_for_shadow (real_pro);
1524 rtx cond;
1526 if (other != NULL_RTX)
1527 real_pro = other;
1528 cond = sched_get_reverse_condition_uncached (real_pro);
1529 /* Verify that the insn does not use a different value in
1530 the condition register than the one that was present at
1531 the jump. */
1532 if (cond == NULL_RTX)
1533 dep_type = REG_DEP_ANTI;
1534 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1536 HARD_REG_SET uses;
1537 CLEAR_HARD_REG_SET (uses);
1538 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1539 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1540 dep_type = REG_DEP_ANTI;
1542 if (dep_type == REG_DEP_CONTROL)
1544 if (sched_verbose >= 5)
1545 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1546 INSN_UID (real_pro));
1547 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1548 REG_DEP_TRUE, false);
1552 add_dependence_1 (con, pro, dep_type);
1555 /* A convenience wrapper to operate on an entire list. HARD should be
1556 true if DEP_NONREG should be set on newly created dependencies. */
1558 static void
1559 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1560 enum reg_note dep_type, bool hard)
1562 mark_as_hard = hard;
1563 for (; list; list = list->next ())
1565 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1566 add_dependence (insn, list->insn (), dep_type);
1568 mark_as_hard = false;
1571 /* Similar, but free *LISTP at the same time, when the context
1572 is not readonly. HARD should be true if DEP_NONREG should be set on
1573 newly created dependencies. */
1575 static void
1576 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1577 rtx_insn_list **listp,
1578 int uncond, enum reg_note dep_type, bool hard)
1580 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1582 /* We don't want to short-circuit dependencies involving debug
1583 insns, because they may cause actual dependencies to be
1584 disregarded. */
1585 if (deps->readonly || DEBUG_INSN_P (insn))
1586 return;
1588 free_INSN_LIST_list (listp);
1591 /* Remove all occurrences of INSN from LIST. Return the number of
1592 occurrences removed. */
1594 static int
1595 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1597 int removed = 0;
1599 while (*listp)
1601 if ((*listp)->insn () == insn)
1603 remove_free_INSN_LIST_node (listp);
1604 removed++;
1605 continue;
1608 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1611 return removed;
1614 /* Same as above, but process two lists at once. */
1615 static int
1616 remove_from_both_dependence_lists (rtx_insn *insn,
1617 rtx_insn_list **listp,
1618 rtx_expr_list **exprp)
1620 int removed = 0;
1622 while (*listp)
1624 if (XEXP (*listp, 0) == insn)
1626 remove_free_INSN_LIST_node (listp);
1627 remove_free_EXPR_LIST_node (exprp);
1628 removed++;
1629 continue;
1632 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1633 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1636 return removed;
1639 /* Clear all dependencies for an insn. */
1640 static void
1641 delete_all_dependences (rtx_insn *insn)
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1646 /* The below cycle can be optimized to clear the caches and back_deps
1647 in one call but that would provoke duplication of code from
1648 delete_dep (). */
1650 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1651 sd_iterator_cond (&sd_it, &dep);)
1652 sd_delete_dep (sd_it);
1655 /* All insns in a scheduling group except the first should only have
1656 dependencies on the previous insn in the group. So we find the
1657 first instruction in the scheduling group by walking the dependence
1658 chains backwards. Then we add the dependencies for the group to
1659 the previous nonnote insn. */
1661 static void
1662 chain_to_prev_insn (rtx_insn *insn)
1664 sd_iterator_def sd_it;
1665 dep_t dep;
1666 rtx_insn *prev_nonnote;
1668 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1670 rtx_insn *i = insn;
1671 rtx_insn *pro = DEP_PRO (dep);
1675 i = prev_nonnote_insn (i);
1677 if (pro == i)
1678 goto next_link;
1679 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1681 if (! sched_insns_conditions_mutex_p (i, pro))
1682 add_dependence (i, pro, DEP_TYPE (dep));
1683 next_link:;
1686 delete_all_dependences (insn);
1688 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1689 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1690 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1691 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 /* Process an insn's memory dependencies. There are four kinds of
1695 dependencies:
1697 (0) read dependence: read follows read
1698 (1) true dependence: read follows write
1699 (2) output dependence: write follows write
1700 (3) anti dependence: write follows read
1702 We are careful to build only dependencies which actually exist, and
1703 use transitivity to avoid building too many links. */
1705 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1706 The MEM is a memory reference contained within INSN, which we are saving
1707 so that we can do memory aliasing on it. */
1709 static void
1710 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1711 rtx_insn *insn, rtx mem)
1713 rtx_insn_list **insn_list;
1714 rtx_insn_list *insn_node;
1715 rtx_expr_list **mem_list;
1716 rtx_expr_list *mem_node;
1718 gcc_assert (!deps->readonly);
1719 if (read_p)
1721 insn_list = &deps->pending_read_insns;
1722 mem_list = &deps->pending_read_mems;
1723 if (!DEBUG_INSN_P (insn))
1724 deps->pending_read_list_length++;
1726 else
1728 insn_list = &deps->pending_write_insns;
1729 mem_list = &deps->pending_write_mems;
1730 deps->pending_write_list_length++;
1733 insn_node = alloc_INSN_LIST (insn, *insn_list);
1734 *insn_list = insn_node;
1736 if (sched_deps_info->use_cselib)
1738 mem = shallow_copy_rtx (mem);
1739 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1740 GET_MODE (mem), insn);
1742 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1743 *mem_list = mem_node;
1746 /* Make a dependency between every memory reference on the pending lists
1747 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1748 dependencies for a read operation, similarly with FOR_WRITE. */
1750 static void
1751 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1752 int for_write)
1754 if (for_write)
1756 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1757 1, REG_DEP_ANTI, true);
1758 if (!deps->readonly)
1760 free_EXPR_LIST_list (&deps->pending_read_mems);
1761 deps->pending_read_list_length = 0;
1765 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1766 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1767 true);
1769 add_dependence_list_and_free (deps, insn,
1770 &deps->last_pending_memory_flush, 1,
1771 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1772 true);
1774 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1775 REG_DEP_ANTI, true);
1777 if (DEBUG_INSN_P (insn))
1779 if (for_write)
1780 free_INSN_LIST_list (&deps->pending_read_insns);
1781 free_INSN_LIST_list (&deps->pending_write_insns);
1782 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1783 free_INSN_LIST_list (&deps->pending_jump_insns);
1786 if (!deps->readonly)
1788 free_EXPR_LIST_list (&deps->pending_write_mems);
1789 deps->pending_write_list_length = 0;
1791 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1792 deps->pending_flush_length = 1;
1794 mark_as_hard = false;
1797 /* Instruction which dependencies we are analyzing. */
1798 static rtx_insn *cur_insn = NULL;
1800 /* Implement hooks for haifa scheduler. */
1802 static void
1803 haifa_start_insn (rtx_insn *insn)
1805 gcc_assert (insn && !cur_insn);
1807 cur_insn = insn;
1810 static void
1811 haifa_finish_insn (void)
1813 cur_insn = NULL;
1816 void
1817 haifa_note_reg_set (int regno)
1819 SET_REGNO_REG_SET (reg_pending_sets, regno);
1822 void
1823 haifa_note_reg_clobber (int regno)
1825 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1828 void
1829 haifa_note_reg_use (int regno)
1831 SET_REGNO_REG_SET (reg_pending_uses, regno);
1834 static void
1835 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1837 if (!(ds & SPECULATIVE))
1839 mem = NULL_RTX;
1840 pending_mem = NULL_RTX;
1842 else
1843 gcc_assert (ds & BEGIN_DATA);
1846 dep_def _dep, *dep = &_dep;
1848 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1849 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1850 DEP_NONREG (dep) = 1;
1851 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1856 static void
1857 haifa_note_dep (rtx_insn *elem, ds_t ds)
1859 dep_def _dep;
1860 dep_t dep = &_dep;
1862 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1863 if (mark_as_hard)
1864 DEP_NONREG (dep) = 1;
1865 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1868 static void
1869 note_reg_use (int r)
1871 if (sched_deps_info->note_reg_use)
1872 sched_deps_info->note_reg_use (r);
1875 static void
1876 note_reg_set (int r)
1878 if (sched_deps_info->note_reg_set)
1879 sched_deps_info->note_reg_set (r);
1882 static void
1883 note_reg_clobber (int r)
1885 if (sched_deps_info->note_reg_clobber)
1886 sched_deps_info->note_reg_clobber (r);
1889 static void
1890 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1892 if (sched_deps_info->note_mem_dep)
1893 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1896 static void
1897 note_dep (rtx_insn *e, ds_t ds)
1899 if (sched_deps_info->note_dep)
1900 sched_deps_info->note_dep (e, ds);
1903 /* Return corresponding to DS reg_note. */
1904 enum reg_note
1905 ds_to_dt (ds_t ds)
1907 if (ds & DEP_TRUE)
1908 return REG_DEP_TRUE;
1909 else if (ds & DEP_OUTPUT)
1910 return REG_DEP_OUTPUT;
1911 else if (ds & DEP_ANTI)
1912 return REG_DEP_ANTI;
1913 else
1915 gcc_assert (ds & DEP_CONTROL);
1916 return REG_DEP_CONTROL;
1922 /* Functions for computation of info needed for register pressure
1923 sensitive insn scheduling. */
1926 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1927 static struct reg_use_data *
1928 create_insn_reg_use (int regno, rtx_insn *insn)
1930 struct reg_use_data *use;
1932 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1933 use->regno = regno;
1934 use->insn = insn;
1935 use->next_insn_use = INSN_REG_USE_LIST (insn);
1936 INSN_REG_USE_LIST (insn) = use;
1937 return use;
1940 /* Allocate reg_set_data structure for REGNO and INSN. */
1941 static void
1942 create_insn_reg_set (int regno, rtx insn)
1944 struct reg_set_data *set;
1946 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1947 set->regno = regno;
1948 set->insn = insn;
1949 set->next_insn_set = INSN_REG_SET_LIST (insn);
1950 INSN_REG_SET_LIST (insn) = set;
1953 /* Set up insn register uses for INSN and dependency context DEPS. */
1954 static void
1955 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1957 unsigned i;
1958 reg_set_iterator rsi;
1959 struct reg_use_data *use, *use2, *next;
1960 struct deps_reg *reg_last;
1962 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1964 if (i < FIRST_PSEUDO_REGISTER
1965 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1966 continue;
1968 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1969 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1970 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1971 /* Ignore use which is not dying. */
1972 continue;
1974 use = create_insn_reg_use (i, insn);
1975 use->next_regno_use = use;
1976 reg_last = &deps->reg_last[i];
1978 /* Create the cycle list of uses. */
1979 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1981 use2 = create_insn_reg_use (i, list->insn ());
1982 next = use->next_regno_use;
1983 use->next_regno_use = use2;
1984 use2->next_regno_use = next;
1989 /* Register pressure info for the currently processed insn. */
1990 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1992 /* Return TRUE if INSN has the use structure for REGNO. */
1993 static bool
1994 insn_use_p (rtx insn, int regno)
1996 struct reg_use_data *use;
1998 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1999 if (use->regno == regno)
2000 return true;
2001 return false;
2004 /* Update the register pressure info after birth of pseudo register REGNO
2005 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2006 the register is in clobber or unused after the insn. */
2007 static void
2008 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2010 int incr, new_incr;
2011 enum reg_class cl;
2013 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2014 cl = sched_regno_pressure_class[regno];
2015 if (cl != NO_REGS)
2017 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2018 if (clobber_p)
2020 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2021 reg_pressure_info[cl].clobber_increase = new_incr;
2023 else if (unused_p)
2025 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2026 reg_pressure_info[cl].unused_set_increase = new_incr;
2028 else
2030 new_incr = reg_pressure_info[cl].set_increase + incr;
2031 reg_pressure_info[cl].set_increase = new_incr;
2032 if (! insn_use_p (insn, regno))
2033 reg_pressure_info[cl].change += incr;
2034 create_insn_reg_set (regno, insn);
2036 gcc_assert (new_incr < (1 << INCREASE_BITS));
2040 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2041 hard registers involved in the birth. */
2042 static void
2043 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2044 bool clobber_p, bool unused_p)
2046 enum reg_class cl;
2047 int new_incr, last = regno + nregs;
2049 while (regno < last)
2051 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2052 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2054 cl = sched_regno_pressure_class[regno];
2055 if (cl != NO_REGS)
2057 if (clobber_p)
2059 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2060 reg_pressure_info[cl].clobber_increase = new_incr;
2062 else if (unused_p)
2064 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2065 reg_pressure_info[cl].unused_set_increase = new_incr;
2067 else
2069 new_incr = reg_pressure_info[cl].set_increase + 1;
2070 reg_pressure_info[cl].set_increase = new_incr;
2071 if (! insn_use_p (insn, regno))
2072 reg_pressure_info[cl].change += 1;
2073 create_insn_reg_set (regno, insn);
2075 gcc_assert (new_incr < (1 << INCREASE_BITS));
2078 regno++;
2082 /* Update the register pressure info after birth of pseudo or hard
2083 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2084 correspondingly that the register is in clobber or unused after the
2085 insn. */
2086 static void
2087 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2089 int regno;
2091 if (GET_CODE (reg) == SUBREG)
2092 reg = SUBREG_REG (reg);
2094 if (! REG_P (reg))
2095 return;
2097 regno = REGNO (reg);
2098 if (regno < FIRST_PSEUDO_REGISTER)
2099 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2100 clobber_p, unused_p);
2101 else
2102 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2105 /* Update the register pressure info after death of pseudo register
2106 REGNO. */
2107 static void
2108 mark_pseudo_death (int regno)
2110 int incr;
2111 enum reg_class cl;
2113 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2114 cl = sched_regno_pressure_class[regno];
2115 if (cl != NO_REGS)
2117 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2118 reg_pressure_info[cl].change -= incr;
2122 /* Like mark_pseudo_death except that NREGS saying how many hard
2123 registers involved in the death. */
2124 static void
2125 mark_hard_regno_death (int regno, int nregs)
2127 enum reg_class cl;
2128 int last = regno + nregs;
2130 while (regno < last)
2132 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2133 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2135 cl = sched_regno_pressure_class[regno];
2136 if (cl != NO_REGS)
2137 reg_pressure_info[cl].change -= 1;
2139 regno++;
2143 /* Update the register pressure info after death of pseudo or hard
2144 register REG. */
2145 static void
2146 mark_reg_death (rtx reg)
2148 int regno;
2150 if (GET_CODE (reg) == SUBREG)
2151 reg = SUBREG_REG (reg);
2153 if (! REG_P (reg))
2154 return;
2156 regno = REGNO (reg);
2157 if (regno < FIRST_PSEUDO_REGISTER)
2158 mark_hard_regno_death (regno, REG_NREGS (reg));
2159 else
2160 mark_pseudo_death (regno);
2163 /* Process SETTER of REG. DATA is an insn containing the setter. */
2164 static void
2165 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2167 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2168 return;
2169 mark_insn_reg_birth
2170 ((rtx) data, reg, false,
2171 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2174 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2175 static void
2176 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2178 if (GET_CODE (setter) == CLOBBER)
2179 mark_insn_reg_birth ((rtx) data, reg, true, false);
2182 /* Set up reg pressure info related to INSN. */
2183 void
2184 init_insn_reg_pressure_info (rtx_insn *insn)
2186 int i, len;
2187 enum reg_class cl;
2188 static struct reg_pressure_data *pressure_info;
2189 rtx link;
2191 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2193 if (! INSN_P (insn))
2194 return;
2196 for (i = 0; i < ira_pressure_classes_num; i++)
2198 cl = ira_pressure_classes[i];
2199 reg_pressure_info[cl].clobber_increase = 0;
2200 reg_pressure_info[cl].set_increase = 0;
2201 reg_pressure_info[cl].unused_set_increase = 0;
2202 reg_pressure_info[cl].change = 0;
2205 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2207 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2209 if (AUTO_INC_DEC)
2210 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2211 if (REG_NOTE_KIND (link) == REG_INC)
2212 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2214 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215 if (REG_NOTE_KIND (link) == REG_DEAD)
2216 mark_reg_death (XEXP (link, 0));
2218 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2219 pressure_info
2220 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2221 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2222 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2223 * sizeof (int), 1);
2224 for (i = 0; i < ira_pressure_classes_num; i++)
2226 cl = ira_pressure_classes[i];
2227 pressure_info[i].clobber_increase
2228 = reg_pressure_info[cl].clobber_increase;
2229 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2230 pressure_info[i].unused_set_increase
2231 = reg_pressure_info[cl].unused_set_increase;
2232 pressure_info[i].change = reg_pressure_info[cl].change;
2239 /* Internal variable for sched_analyze_[12] () functions.
2240 If it is nonzero, this means that sched_analyze_[12] looks
2241 at the most toplevel SET. */
2242 static bool can_start_lhs_rhs_p;
2244 /* Extend reg info for the deps context DEPS given that
2245 we have just generated a register numbered REGNO. */
2246 static void
2247 extend_deps_reg_info (struct deps_desc *deps, int regno)
2249 int max_regno = regno + 1;
2251 gcc_assert (!reload_completed);
2253 /* In a readonly context, it would not hurt to extend info,
2254 but it should not be needed. */
2255 if (reload_completed && deps->readonly)
2257 deps->max_reg = max_regno;
2258 return;
2261 if (max_regno > deps->max_reg)
2263 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2264 max_regno);
2265 memset (&deps->reg_last[deps->max_reg],
2266 0, (max_regno - deps->max_reg)
2267 * sizeof (struct deps_reg));
2268 deps->max_reg = max_regno;
2272 /* Extends REG_INFO_P if needed. */
2273 void
2274 maybe_extend_reg_info_p (void)
2276 /* Extend REG_INFO_P, if needed. */
2277 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2279 size_t new_reg_info_p_size = max_regno + 128;
2281 gcc_assert (!reload_completed && sel_sched_p ());
2283 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2284 new_reg_info_p_size,
2285 reg_info_p_size,
2286 sizeof (*reg_info_p));
2287 reg_info_p_size = new_reg_info_p_size;
2291 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2292 The type of the reference is specified by REF and can be SET,
2293 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2295 static void
2296 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2297 enum rtx_code ref, rtx_insn *insn)
2299 /* We could emit new pseudos in renaming. Extend the reg structures. */
2300 if (!reload_completed && sel_sched_p ()
2301 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2302 extend_deps_reg_info (deps, regno);
2304 maybe_extend_reg_info_p ();
2306 /* A hard reg in a wide mode may really be multiple registers.
2307 If so, mark all of them just like the first. */
2308 if (regno < FIRST_PSEUDO_REGISTER)
2310 int i = hard_regno_nregs[regno][mode];
2311 if (ref == SET)
2313 while (--i >= 0)
2314 note_reg_set (regno + i);
2316 else if (ref == USE)
2318 while (--i >= 0)
2319 note_reg_use (regno + i);
2321 else
2323 while (--i >= 0)
2324 note_reg_clobber (regno + i);
2328 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2329 it does not reload. Ignore these as they have served their
2330 purpose already. */
2331 else if (regno >= deps->max_reg)
2333 enum rtx_code code = GET_CODE (PATTERN (insn));
2334 gcc_assert (code == USE || code == CLOBBER);
2337 else
2339 if (ref == SET)
2340 note_reg_set (regno);
2341 else if (ref == USE)
2342 note_reg_use (regno);
2343 else
2344 note_reg_clobber (regno);
2346 /* Pseudos that are REG_EQUIV to something may be replaced
2347 by that during reloading. We need only add dependencies for
2348 the address in the REG_EQUIV note. */
2349 if (!reload_completed && get_reg_known_equiv_p (regno))
2351 rtx t = get_reg_known_value (regno);
2352 if (MEM_P (t))
2353 sched_analyze_2 (deps, XEXP (t, 0), insn);
2356 /* Don't let it cross a call after scheduling if it doesn't
2357 already cross one. */
2358 if (REG_N_CALLS_CROSSED (regno) == 0)
2360 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2361 deps->sched_before_next_call
2362 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2363 else
2364 add_dependence_list (insn, deps->last_function_call, 1,
2365 REG_DEP_ANTI, false);
2370 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2371 rtx, X, creating all dependencies generated by the write to the
2372 destination of X, and reads of everything mentioned. */
2374 static void
2375 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2377 rtx dest = XEXP (x, 0);
2378 enum rtx_code code = GET_CODE (x);
2379 bool cslr_p = can_start_lhs_rhs_p;
2381 can_start_lhs_rhs_p = false;
2383 gcc_assert (dest);
2384 if (dest == 0)
2385 return;
2387 if (cslr_p && sched_deps_info->start_lhs)
2388 sched_deps_info->start_lhs (dest);
2390 if (GET_CODE (dest) == PARALLEL)
2392 int i;
2394 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2395 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2396 sched_analyze_1 (deps,
2397 gen_rtx_CLOBBER (VOIDmode,
2398 XEXP (XVECEXP (dest, 0, i), 0)),
2399 insn);
2401 if (cslr_p && sched_deps_info->finish_lhs)
2402 sched_deps_info->finish_lhs ();
2404 if (code == SET)
2406 can_start_lhs_rhs_p = cslr_p;
2408 sched_analyze_2 (deps, SET_SRC (x), insn);
2410 can_start_lhs_rhs_p = false;
2413 return;
2416 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2417 || GET_CODE (dest) == ZERO_EXTRACT)
2419 if (GET_CODE (dest) == STRICT_LOW_PART
2420 || GET_CODE (dest) == ZERO_EXTRACT
2421 || df_read_modify_subreg_p (dest))
2423 /* These both read and modify the result. We must handle
2424 them as writes to get proper dependencies for following
2425 instructions. We must handle them as reads to get proper
2426 dependencies from this to previous instructions.
2427 Thus we need to call sched_analyze_2. */
2429 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2431 if (GET_CODE (dest) == ZERO_EXTRACT)
2433 /* The second and third arguments are values read by this insn. */
2434 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2435 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2437 dest = XEXP (dest, 0);
2440 if (REG_P (dest))
2442 int regno = REGNO (dest);
2443 machine_mode mode = GET_MODE (dest);
2445 sched_analyze_reg (deps, regno, mode, code, insn);
2447 #ifdef STACK_REGS
2448 /* Treat all writes to a stack register as modifying the TOS. */
2449 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2451 /* Avoid analyzing the same register twice. */
2452 if (regno != FIRST_STACK_REG)
2453 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2455 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2456 FIRST_STACK_REG);
2458 #endif
2460 else if (MEM_P (dest))
2462 /* Writing memory. */
2463 rtx t = dest;
2465 if (sched_deps_info->use_cselib)
2467 machine_mode address_mode = get_address_mode (dest);
2469 t = shallow_copy_rtx (dest);
2470 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2471 GET_MODE (t), insn);
2472 XEXP (t, 0)
2473 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2474 insn);
2476 t = canon_rtx (t);
2478 /* Pending lists can't get larger with a readonly context. */
2479 if (!deps->readonly
2480 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2481 >= MAX_PENDING_LIST_LENGTH))
2483 /* Flush all pending reads and writes to prevent the pending lists
2484 from getting any larger. Insn scheduling runs too slowly when
2485 these lists get long. When compiling GCC with itself,
2486 this flush occurs 8 times for sparc, and 10 times for m88k using
2487 the default value of 32. */
2488 flush_pending_lists (deps, insn, false, true);
2490 else
2492 rtx_insn_list *pending;
2493 rtx_expr_list *pending_mem;
2495 pending = deps->pending_read_insns;
2496 pending_mem = deps->pending_read_mems;
2497 while (pending)
2499 if (anti_dependence (pending_mem->element (), t)
2500 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2501 note_mem_dep (t, pending_mem->element (), pending->insn (),
2502 DEP_ANTI);
2504 pending = pending->next ();
2505 pending_mem = pending_mem->next ();
2508 pending = deps->pending_write_insns;
2509 pending_mem = deps->pending_write_mems;
2510 while (pending)
2512 if (output_dependence (pending_mem->element (), t)
2513 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2514 note_mem_dep (t, pending_mem->element (),
2515 pending->insn (),
2516 DEP_OUTPUT);
2518 pending = pending->next ();
2519 pending_mem = pending_mem-> next ();
2522 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2523 REG_DEP_ANTI, true);
2524 add_dependence_list (insn, deps->pending_jump_insns, 1,
2525 REG_DEP_CONTROL, true);
2527 if (!deps->readonly)
2528 add_insn_mem_dependence (deps, false, insn, dest);
2530 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2533 if (cslr_p && sched_deps_info->finish_lhs)
2534 sched_deps_info->finish_lhs ();
2536 /* Analyze reads. */
2537 if (GET_CODE (x) == SET)
2539 can_start_lhs_rhs_p = cslr_p;
2541 sched_analyze_2 (deps, SET_SRC (x), insn);
2543 can_start_lhs_rhs_p = false;
2547 /* Analyze the uses of memory and registers in rtx X in INSN. */
2548 static void
2549 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2551 int i;
2552 int j;
2553 enum rtx_code code;
2554 const char *fmt;
2555 bool cslr_p = can_start_lhs_rhs_p;
2557 can_start_lhs_rhs_p = false;
2559 gcc_assert (x);
2560 if (x == 0)
2561 return;
2563 if (cslr_p && sched_deps_info->start_rhs)
2564 sched_deps_info->start_rhs (x);
2566 code = GET_CODE (x);
2568 switch (code)
2570 CASE_CONST_ANY:
2571 case SYMBOL_REF:
2572 case CONST:
2573 case LABEL_REF:
2574 /* Ignore constants. */
2575 if (cslr_p && sched_deps_info->finish_rhs)
2576 sched_deps_info->finish_rhs ();
2578 return;
2580 case CC0:
2581 if (!HAVE_cc0)
2582 gcc_unreachable ();
2584 /* User of CC0 depends on immediately preceding insn. */
2585 SCHED_GROUP_P (insn) = 1;
2586 /* Don't move CC0 setter to another block (it can set up the
2587 same flag for previous CC0 users which is safe). */
2588 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2590 if (cslr_p && sched_deps_info->finish_rhs)
2591 sched_deps_info->finish_rhs ();
2593 return;
2595 case REG:
2597 int regno = REGNO (x);
2598 machine_mode mode = GET_MODE (x);
2600 sched_analyze_reg (deps, regno, mode, USE, insn);
2602 #ifdef STACK_REGS
2603 /* Treat all reads of a stack register as modifying the TOS. */
2604 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2606 /* Avoid analyzing the same register twice. */
2607 if (regno != FIRST_STACK_REG)
2608 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2609 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2611 #endif
2613 if (cslr_p && sched_deps_info->finish_rhs)
2614 sched_deps_info->finish_rhs ();
2616 return;
2619 case MEM:
2621 /* Reading memory. */
2622 rtx_insn_list *u;
2623 rtx_insn_list *pending;
2624 rtx_expr_list *pending_mem;
2625 rtx t = x;
2627 if (sched_deps_info->use_cselib)
2629 machine_mode address_mode = get_address_mode (t);
2631 t = shallow_copy_rtx (t);
2632 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2633 GET_MODE (t), insn);
2634 XEXP (t, 0)
2635 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2636 insn);
2639 if (!DEBUG_INSN_P (insn))
2641 t = canon_rtx (t);
2642 pending = deps->pending_read_insns;
2643 pending_mem = deps->pending_read_mems;
2644 while (pending)
2646 if (read_dependence (pending_mem->element (), t)
2647 && ! sched_insns_conditions_mutex_p (insn,
2648 pending->insn ()))
2649 note_mem_dep (t, pending_mem->element (),
2650 pending->insn (),
2651 DEP_ANTI);
2653 pending = pending->next ();
2654 pending_mem = pending_mem->next ();
2657 pending = deps->pending_write_insns;
2658 pending_mem = deps->pending_write_mems;
2659 while (pending)
2661 if (true_dependence (pending_mem->element (), VOIDmode, t)
2662 && ! sched_insns_conditions_mutex_p (insn,
2663 pending->insn ()))
2664 note_mem_dep (t, pending_mem->element (),
2665 pending->insn (),
2666 sched_deps_info->generate_spec_deps
2667 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2669 pending = pending->next ();
2670 pending_mem = pending_mem->next ();
2673 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2674 add_dependence (insn, u->insn (), REG_DEP_ANTI);
2676 for (u = deps->pending_jump_insns; u; u = u->next ())
2677 if (deps_may_trap_p (x))
2679 if ((sched_deps_info->generate_spec_deps)
2680 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2682 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2683 MAX_DEP_WEAK);
2685 note_dep (u->insn (), ds);
2687 else
2688 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2692 /* Always add these dependencies to pending_reads, since
2693 this insn may be followed by a write. */
2694 if (!deps->readonly)
2696 if ((deps->pending_read_list_length
2697 + deps->pending_write_list_length)
2698 >= MAX_PENDING_LIST_LENGTH
2699 && !DEBUG_INSN_P (insn))
2700 flush_pending_lists (deps, insn, true, true);
2701 add_insn_mem_dependence (deps, true, insn, x);
2704 sched_analyze_2 (deps, XEXP (x, 0), insn);
2706 if (cslr_p && sched_deps_info->finish_rhs)
2707 sched_deps_info->finish_rhs ();
2709 return;
2712 /* Force pending stores to memory in case a trap handler needs them. */
2713 case TRAP_IF:
2714 flush_pending_lists (deps, insn, true, false);
2715 break;
2717 case PREFETCH:
2718 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2719 reg_pending_barrier = TRUE_BARRIER;
2720 /* Prefetch insn contains addresses only. So if the prefetch
2721 address has no registers, there will be no dependencies on
2722 the prefetch insn. This is wrong with result code
2723 correctness point of view as such prefetch can be moved below
2724 a jump insn which usually generates MOVE_BARRIER preventing
2725 to move insns containing registers or memories through the
2726 barrier. It is also wrong with generated code performance
2727 point of view as prefetch withouth dependecies will have a
2728 tendency to be issued later instead of earlier. It is hard
2729 to generate accurate dependencies for prefetch insns as
2730 prefetch has only the start address but it is better to have
2731 something than nothing. */
2732 if (!deps->readonly)
2734 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2735 if (sched_deps_info->use_cselib)
2736 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2737 add_insn_mem_dependence (deps, true, insn, x);
2739 break;
2741 case UNSPEC_VOLATILE:
2742 flush_pending_lists (deps, insn, true, true);
2743 /* FALLTHRU */
2745 case ASM_OPERANDS:
2746 case ASM_INPUT:
2748 /* Traditional and volatile asm instructions must be considered to use
2749 and clobber all hard registers, all pseudo-registers and all of
2750 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2752 Consider for instance a volatile asm that changes the fpu rounding
2753 mode. An insn should not be moved across this even if it only uses
2754 pseudo-regs because it might give an incorrectly rounded result. */
2755 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2756 && !DEBUG_INSN_P (insn))
2757 reg_pending_barrier = TRUE_BARRIER;
2759 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2760 We can not just fall through here since then we would be confused
2761 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2762 traditional asms unlike their normal usage. */
2764 if (code == ASM_OPERANDS)
2766 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2767 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2769 if (cslr_p && sched_deps_info->finish_rhs)
2770 sched_deps_info->finish_rhs ();
2772 return;
2774 break;
2777 case PRE_DEC:
2778 case POST_DEC:
2779 case PRE_INC:
2780 case POST_INC:
2781 /* These both read and modify the result. We must handle them as writes
2782 to get proper dependencies for following instructions. We must handle
2783 them as reads to get proper dependencies from this to previous
2784 instructions. Thus we need to pass them to both sched_analyze_1
2785 and sched_analyze_2. We must call sched_analyze_2 first in order
2786 to get the proper antecedent for the read. */
2787 sched_analyze_2 (deps, XEXP (x, 0), insn);
2788 sched_analyze_1 (deps, x, insn);
2790 if (cslr_p && sched_deps_info->finish_rhs)
2791 sched_deps_info->finish_rhs ();
2793 return;
2795 case POST_MODIFY:
2796 case PRE_MODIFY:
2797 /* op0 = op0 + op1 */
2798 sched_analyze_2 (deps, XEXP (x, 0), insn);
2799 sched_analyze_2 (deps, XEXP (x, 1), insn);
2800 sched_analyze_1 (deps, x, insn);
2802 if (cslr_p && sched_deps_info->finish_rhs)
2803 sched_deps_info->finish_rhs ();
2805 return;
2807 default:
2808 break;
2811 /* Other cases: walk the insn. */
2812 fmt = GET_RTX_FORMAT (code);
2813 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2815 if (fmt[i] == 'e')
2816 sched_analyze_2 (deps, XEXP (x, i), insn);
2817 else if (fmt[i] == 'E')
2818 for (j = 0; j < XVECLEN (x, i); j++)
2819 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2822 if (cslr_p && sched_deps_info->finish_rhs)
2823 sched_deps_info->finish_rhs ();
2826 /* Try to group two fusible insns together to prevent scheduler
2827 from scheduling them apart. */
2829 static void
2830 sched_macro_fuse_insns (rtx_insn *insn)
2832 rtx_insn *prev;
2834 if (any_condjump_p (insn))
2836 unsigned int condreg1, condreg2;
2837 rtx cc_reg_1;
2838 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2839 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2840 prev = prev_nonnote_nondebug_insn (insn);
2841 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2842 || !prev
2843 || !modified_in_p (cc_reg_1, prev))
2844 return;
2846 else
2848 rtx insn_set = single_set (insn);
2850 prev = prev_nonnote_nondebug_insn (insn);
2851 if (!prev
2852 || !insn_set
2853 || !single_set (prev))
2854 return;
2858 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2859 SCHED_GROUP_P (insn) = 1;
2863 /* Analyze an INSN with pattern X to find all dependencies. */
2864 static void
2865 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2867 RTX_CODE code = GET_CODE (x);
2868 rtx link;
2869 unsigned i;
2870 reg_set_iterator rsi;
2872 if (! reload_completed)
2874 HARD_REG_SET temp;
2876 extract_insn (insn);
2877 preprocess_constraints (insn);
2878 alternative_mask prefrred = get_preferred_alternatives (insn);
2879 ira_implicitly_set_insn_hard_regs (&temp, prefrred);
2880 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2881 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2884 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2885 && code == SET);
2887 /* Group compare and branch insns for macro-fusion. */
2888 if (targetm.sched.macro_fusion_p
2889 && targetm.sched.macro_fusion_p ())
2890 sched_macro_fuse_insns (insn);
2892 if (may_trap_p (x))
2893 /* Avoid moving trapping instructions across function calls that might
2894 not always return. */
2895 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2896 1, REG_DEP_ANTI, true);
2898 /* We must avoid creating a situation in which two successors of the
2899 current block have different unwind info after scheduling. If at any
2900 point the two paths re-join this leads to incorrect unwind info. */
2901 /* ??? There are certain situations involving a forced frame pointer in
2902 which, with extra effort, we could fix up the unwind info at a later
2903 CFG join. However, it seems better to notice these cases earlier
2904 during prologue generation and avoid marking the frame pointer setup
2905 as frame-related at all. */
2906 if (RTX_FRAME_RELATED_P (insn))
2908 /* Make sure prologue insn is scheduled before next jump. */
2909 deps->sched_before_next_jump
2910 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2912 /* Make sure epilogue insn is scheduled after preceding jumps. */
2913 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2914 true);
2917 if (code == COND_EXEC)
2919 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2921 /* ??? Should be recording conditions so we reduce the number of
2922 false dependencies. */
2923 x = COND_EXEC_CODE (x);
2924 code = GET_CODE (x);
2926 if (code == SET || code == CLOBBER)
2928 sched_analyze_1 (deps, x, insn);
2930 /* Bare clobber insns are used for letting life analysis, reg-stack
2931 and others know that a value is dead. Depend on the last call
2932 instruction so that reg-stack won't get confused. */
2933 if (code == CLOBBER)
2934 add_dependence_list (insn, deps->last_function_call, 1,
2935 REG_DEP_OUTPUT, true);
2937 else if (code == PARALLEL)
2939 for (i = XVECLEN (x, 0); i--;)
2941 rtx sub = XVECEXP (x, 0, i);
2942 code = GET_CODE (sub);
2944 if (code == COND_EXEC)
2946 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2947 sub = COND_EXEC_CODE (sub);
2948 code = GET_CODE (sub);
2950 if (code == SET || code == CLOBBER)
2951 sched_analyze_1 (deps, sub, insn);
2952 else
2953 sched_analyze_2 (deps, sub, insn);
2956 else
2957 sched_analyze_2 (deps, x, insn);
2959 /* Mark registers CLOBBERED or used by called function. */
2960 if (CALL_P (insn))
2962 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2964 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2965 sched_analyze_1 (deps, XEXP (link, 0), insn);
2966 else if (GET_CODE (XEXP (link, 0)) != SET)
2967 sched_analyze_2 (deps, XEXP (link, 0), insn);
2969 /* Don't schedule anything after a tail call, tail call needs
2970 to use at least all call-saved registers. */
2971 if (SIBLING_CALL_P (insn))
2972 reg_pending_barrier = TRUE_BARRIER;
2973 else if (find_reg_note (insn, REG_SETJMP, NULL))
2974 reg_pending_barrier = MOVE_BARRIER;
2977 if (JUMP_P (insn))
2979 rtx_insn *next = next_nonnote_nondebug_insn (insn);
2980 if (next && BARRIER_P (next))
2981 reg_pending_barrier = MOVE_BARRIER;
2982 else
2984 rtx_insn_list *pending;
2985 rtx_expr_list *pending_mem;
2987 if (sched_deps_info->compute_jump_reg_dependencies)
2989 (*sched_deps_info->compute_jump_reg_dependencies)
2990 (insn, reg_pending_control_uses);
2992 /* Make latency of jump equal to 0 by using anti-dependence. */
2993 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2995 struct deps_reg *reg_last = &deps->reg_last[i];
2996 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
2997 false);
2998 add_dependence_list (insn, reg_last->implicit_sets,
2999 0, REG_DEP_ANTI, false);
3000 add_dependence_list (insn, reg_last->clobbers, 0,
3001 REG_DEP_ANTI, false);
3005 /* All memory writes and volatile reads must happen before the
3006 jump. Non-volatile reads must happen before the jump iff
3007 the result is needed by the above register used mask. */
3009 pending = deps->pending_write_insns;
3010 pending_mem = deps->pending_write_mems;
3011 while (pending)
3013 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3014 add_dependence (insn, pending->insn (),
3015 REG_DEP_OUTPUT);
3016 pending = pending->next ();
3017 pending_mem = pending_mem->next ();
3020 pending = deps->pending_read_insns;
3021 pending_mem = deps->pending_read_mems;
3022 while (pending)
3024 if (MEM_VOLATILE_P (pending_mem->element ())
3025 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3026 add_dependence (insn, pending->insn (),
3027 REG_DEP_OUTPUT);
3028 pending = pending->next ();
3029 pending_mem = pending_mem->next ();
3032 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3033 REG_DEP_ANTI, true);
3034 add_dependence_list (insn, deps->pending_jump_insns, 1,
3035 REG_DEP_ANTI, true);
3039 /* If this instruction can throw an exception, then moving it changes
3040 where block boundaries fall. This is mighty confusing elsewhere.
3041 Therefore, prevent such an instruction from being moved. Same for
3042 non-jump instructions that define block boundaries.
3043 ??? Unclear whether this is still necessary in EBB mode. If not,
3044 add_branch_dependences should be adjusted for RGN mode instead. */
3045 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3046 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3047 reg_pending_barrier = MOVE_BARRIER;
3049 if (sched_pressure != SCHED_PRESSURE_NONE)
3051 setup_insn_reg_uses (deps, insn);
3052 init_insn_reg_pressure_info (insn);
3055 /* Add register dependencies for insn. */
3056 if (DEBUG_INSN_P (insn))
3058 rtx_insn *prev = deps->last_debug_insn;
3059 rtx_insn_list *u;
3061 if (!deps->readonly)
3062 deps->last_debug_insn = insn;
3064 if (prev)
3065 add_dependence (insn, prev, REG_DEP_ANTI);
3067 add_dependence_list (insn, deps->last_function_call, 1,
3068 REG_DEP_ANTI, false);
3070 if (!sel_sched_p ())
3071 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3072 add_dependence (insn, u->insn (), REG_DEP_ANTI);
3074 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3076 struct deps_reg *reg_last = &deps->reg_last[i];
3077 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3078 /* There's no point in making REG_DEP_CONTROL dependencies for
3079 debug insns. */
3080 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3081 false);
3083 if (!deps->readonly)
3084 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3086 CLEAR_REG_SET (reg_pending_uses);
3088 /* Quite often, a debug insn will refer to stuff in the
3089 previous instruction, but the reason we want this
3090 dependency here is to make sure the scheduler doesn't
3091 gratuitously move a debug insn ahead. This could dirty
3092 DF flags and cause additional analysis that wouldn't have
3093 occurred in compilation without debug insns, and such
3094 additional analysis can modify the generated code. */
3095 prev = PREV_INSN (insn);
3097 if (prev && NONDEBUG_INSN_P (prev))
3098 add_dependence (insn, prev, REG_DEP_ANTI);
3100 else
3102 regset_head set_or_clobbered;
3104 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3106 struct deps_reg *reg_last = &deps->reg_last[i];
3107 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3108 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3109 false);
3110 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3111 false);
3113 if (!deps->readonly)
3115 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3116 reg_last->uses_length++;
3120 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3121 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3123 struct deps_reg *reg_last = &deps->reg_last[i];
3124 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3125 add_dependence_list (insn, reg_last->implicit_sets, 0,
3126 REG_DEP_ANTI, false);
3127 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3128 false);
3130 if (!deps->readonly)
3132 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3133 reg_last->uses_length++;
3137 if (targetm.sched.exposed_pipeline)
3139 INIT_REG_SET (&set_or_clobbered);
3140 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3141 reg_pending_sets);
3142 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3144 struct deps_reg *reg_last = &deps->reg_last[i];
3145 rtx list;
3146 for (list = reg_last->uses; list; list = XEXP (list, 1))
3148 rtx other = XEXP (list, 0);
3149 if (INSN_CACHED_COND (other) != const_true_rtx
3150 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3151 INSN_CACHED_COND (other) = const_true_rtx;
3156 /* If the current insn is conditional, we can't free any
3157 of the lists. */
3158 if (sched_has_condition_p (insn))
3160 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3162 struct deps_reg *reg_last = &deps->reg_last[i];
3163 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3164 false);
3165 add_dependence_list (insn, reg_last->implicit_sets, 0,
3166 REG_DEP_ANTI, false);
3167 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3168 false);
3169 add_dependence_list (insn, reg_last->control_uses, 0,
3170 REG_DEP_CONTROL, false);
3172 if (!deps->readonly)
3174 reg_last->clobbers
3175 = alloc_INSN_LIST (insn, reg_last->clobbers);
3176 reg_last->clobbers_length++;
3179 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3181 struct deps_reg *reg_last = &deps->reg_last[i];
3182 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3183 false);
3184 add_dependence_list (insn, reg_last->implicit_sets, 0,
3185 REG_DEP_ANTI, false);
3186 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3187 false);
3188 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3189 false);
3190 add_dependence_list (insn, reg_last->control_uses, 0,
3191 REG_DEP_CONTROL, false);
3193 if (!deps->readonly)
3194 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3197 else
3199 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3201 struct deps_reg *reg_last = &deps->reg_last[i];
3202 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3203 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3205 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3206 REG_DEP_OUTPUT, false);
3207 add_dependence_list_and_free (deps, insn,
3208 &reg_last->implicit_sets, 0,
3209 REG_DEP_ANTI, false);
3210 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3211 REG_DEP_ANTI, false);
3212 add_dependence_list_and_free (deps, insn,
3213 &reg_last->control_uses, 0,
3214 REG_DEP_ANTI, false);
3215 add_dependence_list_and_free (deps, insn,
3216 &reg_last->clobbers, 0,
3217 REG_DEP_OUTPUT, false);
3219 if (!deps->readonly)
3221 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3222 reg_last->clobbers_length = 0;
3223 reg_last->uses_length = 0;
3226 else
3228 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3229 false);
3230 add_dependence_list (insn, reg_last->implicit_sets, 0,
3231 REG_DEP_ANTI, false);
3232 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3233 false);
3234 add_dependence_list (insn, reg_last->control_uses, 0,
3235 REG_DEP_CONTROL, false);
3238 if (!deps->readonly)
3240 reg_last->clobbers_length++;
3241 reg_last->clobbers
3242 = alloc_INSN_LIST (insn, reg_last->clobbers);
3245 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3247 struct deps_reg *reg_last = &deps->reg_last[i];
3249 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3250 REG_DEP_OUTPUT, false);
3251 add_dependence_list_and_free (deps, insn,
3252 &reg_last->implicit_sets,
3253 0, REG_DEP_ANTI, false);
3254 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3255 REG_DEP_OUTPUT, false);
3256 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3257 REG_DEP_ANTI, false);
3258 add_dependence_list (insn, reg_last->control_uses, 0,
3259 REG_DEP_CONTROL, false);
3261 if (!deps->readonly)
3263 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3264 reg_last->uses_length = 0;
3265 reg_last->clobbers_length = 0;
3269 if (!deps->readonly)
3271 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3273 struct deps_reg *reg_last = &deps->reg_last[i];
3274 reg_last->control_uses
3275 = alloc_INSN_LIST (insn, reg_last->control_uses);
3280 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3281 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3283 struct deps_reg *reg_last = &deps->reg_last[i];
3284 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3285 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3286 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3287 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3288 false);
3290 if (!deps->readonly)
3291 reg_last->implicit_sets
3292 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3295 if (!deps->readonly)
3297 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3298 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3299 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3300 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3301 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3302 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3303 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3305 /* Set up the pending barrier found. */
3306 deps->last_reg_pending_barrier = reg_pending_barrier;
3309 CLEAR_REG_SET (reg_pending_uses);
3310 CLEAR_REG_SET (reg_pending_clobbers);
3311 CLEAR_REG_SET (reg_pending_sets);
3312 CLEAR_REG_SET (reg_pending_control_uses);
3313 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3314 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3316 /* Add dependencies if a scheduling barrier was found. */
3317 if (reg_pending_barrier)
3319 /* In the case of barrier the most added dependencies are not
3320 real, so we use anti-dependence here. */
3321 if (sched_has_condition_p (insn))
3323 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3325 struct deps_reg *reg_last = &deps->reg_last[i];
3326 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3327 true);
3328 add_dependence_list (insn, reg_last->sets, 0,
3329 reg_pending_barrier == TRUE_BARRIER
3330 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3331 add_dependence_list (insn, reg_last->implicit_sets, 0,
3332 REG_DEP_ANTI, true);
3333 add_dependence_list (insn, reg_last->clobbers, 0,
3334 reg_pending_barrier == TRUE_BARRIER
3335 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3338 else
3340 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3342 struct deps_reg *reg_last = &deps->reg_last[i];
3343 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3344 REG_DEP_ANTI, true);
3345 add_dependence_list_and_free (deps, insn,
3346 &reg_last->control_uses, 0,
3347 REG_DEP_CONTROL, true);
3348 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3349 reg_pending_barrier == TRUE_BARRIER
3350 ? REG_DEP_TRUE : REG_DEP_ANTI,
3351 true);
3352 add_dependence_list_and_free (deps, insn,
3353 &reg_last->implicit_sets, 0,
3354 REG_DEP_ANTI, true);
3355 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3356 reg_pending_barrier == TRUE_BARRIER
3357 ? REG_DEP_TRUE : REG_DEP_ANTI,
3358 true);
3360 if (!deps->readonly)
3362 reg_last->uses_length = 0;
3363 reg_last->clobbers_length = 0;
3368 if (!deps->readonly)
3369 for (i = 0; i < (unsigned)deps->max_reg; i++)
3371 struct deps_reg *reg_last = &deps->reg_last[i];
3372 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3373 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3376 /* Don't flush pending lists on speculative checks for
3377 selective scheduling. */
3378 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3379 flush_pending_lists (deps, insn, true, true);
3381 reg_pending_barrier = NOT_A_BARRIER;
3384 /* If a post-call group is still open, see if it should remain so.
3385 This insn must be a simple move of a hard reg to a pseudo or
3386 vice-versa.
3388 We must avoid moving these insns for correctness on targets
3389 with small register classes, and for special registers like
3390 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3391 hard regs for all targets. */
3393 if (deps->in_post_call_group_p)
3395 rtx tmp, set = single_set (insn);
3396 int src_regno, dest_regno;
3398 if (set == NULL)
3400 if (DEBUG_INSN_P (insn))
3401 /* We don't want to mark debug insns as part of the same
3402 sched group. We know they really aren't, but if we use
3403 debug insns to tell that a call group is over, we'll
3404 get different code if debug insns are not there and
3405 instructions that follow seem like they should be part
3406 of the call group.
3408 Also, if we did, chain_to_prev_insn would move the
3409 deps of the debug insn to the call insn, modifying
3410 non-debug post-dependency counts of the debug insn
3411 dependencies and otherwise messing with the scheduling
3412 order.
3414 Instead, let such debug insns be scheduled freely, but
3415 keep the call group open in case there are insns that
3416 should be part of it afterwards. Since we grant debug
3417 insns higher priority than even sched group insns, it
3418 will all turn out all right. */
3419 goto debug_dont_end_call_group;
3420 else
3421 goto end_call_group;
3424 tmp = SET_DEST (set);
3425 if (GET_CODE (tmp) == SUBREG)
3426 tmp = SUBREG_REG (tmp);
3427 if (REG_P (tmp))
3428 dest_regno = REGNO (tmp);
3429 else
3430 goto end_call_group;
3432 tmp = SET_SRC (set);
3433 if (GET_CODE (tmp) == SUBREG)
3434 tmp = SUBREG_REG (tmp);
3435 if ((GET_CODE (tmp) == PLUS
3436 || GET_CODE (tmp) == MINUS)
3437 && REG_P (XEXP (tmp, 0))
3438 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3439 && dest_regno == STACK_POINTER_REGNUM)
3440 src_regno = STACK_POINTER_REGNUM;
3441 else if (REG_P (tmp))
3442 src_regno = REGNO (tmp);
3443 else
3444 goto end_call_group;
3446 if (src_regno < FIRST_PSEUDO_REGISTER
3447 || dest_regno < FIRST_PSEUDO_REGISTER)
3449 if (!deps->readonly
3450 && deps->in_post_call_group_p == post_call_initial)
3451 deps->in_post_call_group_p = post_call;
3453 if (!sel_sched_p () || sched_emulate_haifa_p)
3455 SCHED_GROUP_P (insn) = 1;
3456 CANT_MOVE (insn) = 1;
3459 else
3461 end_call_group:
3462 if (!deps->readonly)
3463 deps->in_post_call_group_p = not_post_call;
3467 debug_dont_end_call_group:
3468 if ((current_sched_info->flags & DO_SPECULATION)
3469 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3470 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3471 be speculated. */
3473 if (sel_sched_p ())
3474 sel_mark_hard_insn (insn);
3475 else
3477 sd_iterator_def sd_it;
3478 dep_t dep;
3480 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3481 sd_iterator_cond (&sd_it, &dep);)
3482 change_spec_dep_to_hard (sd_it);
3486 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3487 honor their original ordering. */
3488 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3490 if (deps->last_args_size)
3491 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3492 deps->last_args_size = insn;
3496 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3497 longjmp, loop forever, ...). */
3498 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3499 test for ECF_NORETURN? */
3500 static bool
3501 call_may_noreturn_p (rtx_insn *insn)
3503 rtx call;
3505 /* const or pure calls that aren't looping will always return. */
3506 if (RTL_CONST_OR_PURE_CALL_P (insn)
3507 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3508 return false;
3510 call = get_call_rtx_from (insn);
3511 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3513 rtx symbol = XEXP (XEXP (call, 0), 0);
3514 if (SYMBOL_REF_DECL (symbol)
3515 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3517 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3518 == BUILT_IN_NORMAL)
3519 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3521 case BUILT_IN_BCMP:
3522 case BUILT_IN_BCOPY:
3523 case BUILT_IN_BZERO:
3524 case BUILT_IN_INDEX:
3525 case BUILT_IN_MEMCHR:
3526 case BUILT_IN_MEMCMP:
3527 case BUILT_IN_MEMCPY:
3528 case BUILT_IN_MEMMOVE:
3529 case BUILT_IN_MEMPCPY:
3530 case BUILT_IN_MEMSET:
3531 case BUILT_IN_RINDEX:
3532 case BUILT_IN_STPCPY:
3533 case BUILT_IN_STPNCPY:
3534 case BUILT_IN_STRCAT:
3535 case BUILT_IN_STRCHR:
3536 case BUILT_IN_STRCMP:
3537 case BUILT_IN_STRCPY:
3538 case BUILT_IN_STRCSPN:
3539 case BUILT_IN_STRLEN:
3540 case BUILT_IN_STRNCAT:
3541 case BUILT_IN_STRNCMP:
3542 case BUILT_IN_STRNCPY:
3543 case BUILT_IN_STRPBRK:
3544 case BUILT_IN_STRRCHR:
3545 case BUILT_IN_STRSPN:
3546 case BUILT_IN_STRSTR:
3547 /* Assume certain string/memory builtins always return. */
3548 return false;
3549 default:
3550 break;
3555 /* For all other calls assume that they might not always return. */
3556 return true;
3559 /* Return true if INSN should be made dependent on the previous instruction
3560 group, and if all INSN's dependencies should be moved to the first
3561 instruction of that group. */
3563 static bool
3564 chain_to_prev_insn_p (rtx_insn *insn)
3566 /* INSN forms a group with the previous instruction. */
3567 if (SCHED_GROUP_P (insn))
3568 return true;
3570 /* If the previous instruction clobbers a register R and this one sets
3571 part of R, the clobber was added specifically to help us track the
3572 liveness of R. There's no point scheduling the clobber and leaving
3573 INSN behind, especially if we move the clobber to another block. */
3574 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3575 if (prev
3576 && INSN_P (prev)
3577 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3578 && GET_CODE (PATTERN (prev)) == CLOBBER)
3580 rtx x = XEXP (PATTERN (prev), 0);
3581 if (set_of (x, insn))
3582 return true;
3585 return false;
3588 /* Analyze INSN with DEPS as a context. */
3589 void
3590 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3592 if (sched_deps_info->start_insn)
3593 sched_deps_info->start_insn (insn);
3595 /* Record the condition for this insn. */
3596 if (NONDEBUG_INSN_P (insn))
3598 rtx t;
3599 sched_get_condition_with_rev (insn, NULL);
3600 t = INSN_CACHED_COND (insn);
3601 INSN_COND_DEPS (insn) = NULL;
3602 if (reload_completed
3603 && (current_sched_info->flags & DO_PREDICATION)
3604 && COMPARISON_P (t)
3605 && REG_P (XEXP (t, 0))
3606 && CONSTANT_P (XEXP (t, 1)))
3608 unsigned int regno;
3609 int nregs;
3610 rtx_insn_list *cond_deps = NULL;
3611 t = XEXP (t, 0);
3612 regno = REGNO (t);
3613 nregs = REG_NREGS (t);
3614 while (nregs-- > 0)
3616 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3617 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3618 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3619 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3621 INSN_COND_DEPS (insn) = cond_deps;
3625 if (JUMP_P (insn))
3627 /* Make each JUMP_INSN (but not a speculative check)
3628 a scheduling barrier for memory references. */
3629 if (!deps->readonly
3630 && !(sel_sched_p ()
3631 && sel_insn_is_speculation_check (insn)))
3633 /* Keep the list a reasonable size. */
3634 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3635 flush_pending_lists (deps, insn, true, true);
3636 else
3637 deps->pending_jump_insns
3638 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3641 /* For each insn which shouldn't cross a jump, add a dependence. */
3642 add_dependence_list_and_free (deps, insn,
3643 &deps->sched_before_next_jump, 1,
3644 REG_DEP_ANTI, true);
3646 sched_analyze_insn (deps, PATTERN (insn), insn);
3648 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3650 sched_analyze_insn (deps, PATTERN (insn), insn);
3652 else if (CALL_P (insn))
3654 int i;
3656 CANT_MOVE (insn) = 1;
3658 if (find_reg_note (insn, REG_SETJMP, NULL))
3660 /* This is setjmp. Assume that all registers, not just
3661 hard registers, may be clobbered by this call. */
3662 reg_pending_barrier = MOVE_BARRIER;
3664 else
3666 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3667 /* A call may read and modify global register variables. */
3668 if (global_regs[i])
3670 SET_REGNO_REG_SET (reg_pending_sets, i);
3671 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3673 /* Other call-clobbered hard regs may be clobbered.
3674 Since we only have a choice between 'might be clobbered'
3675 and 'definitely not clobbered', we must include all
3676 partly call-clobbered registers here. */
3677 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3678 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3679 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3680 /* We don't know what set of fixed registers might be used
3681 by the function, but it is certain that the stack pointer
3682 is among them, but be conservative. */
3683 else if (fixed_regs[i])
3684 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3685 /* The frame pointer is normally not used by the function
3686 itself, but by the debugger. */
3687 /* ??? MIPS o32 is an exception. It uses the frame pointer
3688 in the macro expansion of jal but does not represent this
3689 fact in the call_insn rtl. */
3690 else if (i == FRAME_POINTER_REGNUM
3691 || (i == HARD_FRAME_POINTER_REGNUM
3692 && (! reload_completed || frame_pointer_needed)))
3693 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3696 /* For each insn which shouldn't cross a call, add a dependence
3697 between that insn and this call insn. */
3698 add_dependence_list_and_free (deps, insn,
3699 &deps->sched_before_next_call, 1,
3700 REG_DEP_ANTI, true);
3702 sched_analyze_insn (deps, PATTERN (insn), insn);
3704 /* If CALL would be in a sched group, then this will violate
3705 convention that sched group insns have dependencies only on the
3706 previous instruction.
3708 Of course one can say: "Hey! What about head of the sched group?"
3709 And I will answer: "Basic principles (one dep per insn) are always
3710 the same." */
3711 gcc_assert (!SCHED_GROUP_P (insn));
3713 /* In the absence of interprocedural alias analysis, we must flush
3714 all pending reads and writes, and start new dependencies starting
3715 from here. But only flush writes for constant calls (which may
3716 be passed a pointer to something we haven't written yet). */
3717 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3719 if (!deps->readonly)
3721 /* Remember the last function call for limiting lifetimes. */
3722 free_INSN_LIST_list (&deps->last_function_call);
3723 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3725 if (call_may_noreturn_p (insn))
3727 /* Remember the last function call that might not always return
3728 normally for limiting moves of trapping insns. */
3729 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3730 deps->last_function_call_may_noreturn
3731 = alloc_INSN_LIST (insn, NULL_RTX);
3734 /* Before reload, begin a post-call group, so as to keep the
3735 lifetimes of hard registers correct. */
3736 if (! reload_completed)
3737 deps->in_post_call_group_p = post_call;
3741 if (sched_deps_info->use_cselib)
3742 cselib_process_insn (insn);
3744 if (sched_deps_info->finish_insn)
3745 sched_deps_info->finish_insn ();
3747 /* Fixup the dependencies in the sched group. */
3748 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3749 && chain_to_prev_insn_p (insn)
3750 && !sel_sched_p ())
3751 chain_to_prev_insn (insn);
3754 /* Initialize DEPS for the new block beginning with HEAD. */
3755 void
3756 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3758 gcc_assert (!deps->readonly);
3760 /* Before reload, if the previous block ended in a call, show that
3761 we are inside a post-call group, so as to keep the lifetimes of
3762 hard registers correct. */
3763 if (! reload_completed && !LABEL_P (head))
3765 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3767 if (insn && CALL_P (insn))
3768 deps->in_post_call_group_p = post_call_initial;
3772 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3773 dependencies for each insn. */
3774 void
3775 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3777 rtx_insn *insn;
3779 if (sched_deps_info->use_cselib)
3780 cselib_init (CSELIB_RECORD_MEMORY);
3782 deps_start_bb (deps, head);
3784 for (insn = head;; insn = NEXT_INSN (insn))
3787 if (INSN_P (insn))
3789 /* And initialize deps_lists. */
3790 sd_init_insn (insn);
3791 /* Clean up SCHED_GROUP_P which may be set by last
3792 scheduler pass. */
3793 if (SCHED_GROUP_P (insn))
3794 SCHED_GROUP_P (insn) = 0;
3797 deps_analyze_insn (deps, insn);
3799 if (insn == tail)
3801 if (sched_deps_info->use_cselib)
3802 cselib_finish ();
3803 return;
3806 gcc_unreachable ();
3809 /* Helper for sched_free_deps ().
3810 Delete INSN's (RESOLVED_P) backward dependencies. */
3811 static void
3812 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3814 sd_iterator_def sd_it;
3815 dep_t dep;
3816 sd_list_types_def types;
3818 if (resolved_p)
3819 types = SD_LIST_RES_BACK;
3820 else
3821 types = SD_LIST_BACK;
3823 for (sd_it = sd_iterator_start (insn, types);
3824 sd_iterator_cond (&sd_it, &dep);)
3826 dep_link_t link = *sd_it.linkp;
3827 dep_node_t node = DEP_LINK_NODE (link);
3828 deps_list_t back_list;
3829 deps_list_t forw_list;
3831 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3832 remove_from_deps_list (link, back_list);
3833 delete_dep_node (node);
3837 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3838 deps_lists. */
3839 void
3840 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3842 rtx_insn *insn;
3843 rtx_insn *next_tail = NEXT_INSN (tail);
3845 /* We make two passes since some insns may be scheduled before their
3846 dependencies are resolved. */
3847 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3848 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3850 /* Clear forward deps and leave the dep_nodes to the
3851 corresponding back_deps list. */
3852 if (resolved_p)
3853 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3854 else
3855 clear_deps_list (INSN_FORW_DEPS (insn));
3857 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3858 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3860 /* Clear resolved back deps together with its dep_nodes. */
3861 delete_dep_nodes_in_back_deps (insn, resolved_p);
3863 sd_finish_insn (insn);
3867 /* Initialize variables for region data dependence analysis.
3868 When LAZY_REG_LAST is true, do not allocate reg_last array
3869 of struct deps_desc immediately. */
3871 void
3872 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3874 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3876 deps->max_reg = max_reg;
3877 if (lazy_reg_last)
3878 deps->reg_last = NULL;
3879 else
3880 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3881 INIT_REG_SET (&deps->reg_last_in_use);
3883 deps->pending_read_insns = 0;
3884 deps->pending_read_mems = 0;
3885 deps->pending_write_insns = 0;
3886 deps->pending_write_mems = 0;
3887 deps->pending_jump_insns = 0;
3888 deps->pending_read_list_length = 0;
3889 deps->pending_write_list_length = 0;
3890 deps->pending_flush_length = 0;
3891 deps->last_pending_memory_flush = 0;
3892 deps->last_function_call = 0;
3893 deps->last_function_call_may_noreturn = 0;
3894 deps->sched_before_next_call = 0;
3895 deps->sched_before_next_jump = 0;
3896 deps->in_post_call_group_p = not_post_call;
3897 deps->last_debug_insn = 0;
3898 deps->last_args_size = 0;
3899 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3900 deps->readonly = 0;
3903 /* Init only reg_last field of DEPS, which was not allocated before as
3904 we inited DEPS lazily. */
3905 void
3906 init_deps_reg_last (struct deps_desc *deps)
3908 gcc_assert (deps && deps->max_reg > 0);
3909 gcc_assert (deps->reg_last == NULL);
3911 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3915 /* Free insn lists found in DEPS. */
3917 void
3918 free_deps (struct deps_desc *deps)
3920 unsigned i;
3921 reg_set_iterator rsi;
3923 /* We set max_reg to 0 when this context was already freed. */
3924 if (deps->max_reg == 0)
3926 gcc_assert (deps->reg_last == NULL);
3927 return;
3929 deps->max_reg = 0;
3931 free_INSN_LIST_list (&deps->pending_read_insns);
3932 free_EXPR_LIST_list (&deps->pending_read_mems);
3933 free_INSN_LIST_list (&deps->pending_write_insns);
3934 free_EXPR_LIST_list (&deps->pending_write_mems);
3935 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3937 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3938 times. For a testcase with 42000 regs and 8000 small basic blocks,
3939 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3940 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3942 struct deps_reg *reg_last = &deps->reg_last[i];
3943 if (reg_last->uses)
3944 free_INSN_LIST_list (&reg_last->uses);
3945 if (reg_last->sets)
3946 free_INSN_LIST_list (&reg_last->sets);
3947 if (reg_last->implicit_sets)
3948 free_INSN_LIST_list (&reg_last->implicit_sets);
3949 if (reg_last->control_uses)
3950 free_INSN_LIST_list (&reg_last->control_uses);
3951 if (reg_last->clobbers)
3952 free_INSN_LIST_list (&reg_last->clobbers);
3954 CLEAR_REG_SET (&deps->reg_last_in_use);
3956 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3957 it at all. */
3958 free (deps->reg_last);
3959 deps->reg_last = NULL;
3961 deps = NULL;
3964 /* Remove INSN from dependence contexts DEPS. */
3965 void
3966 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
3968 int removed;
3969 unsigned i;
3970 reg_set_iterator rsi;
3972 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3973 &deps->pending_read_mems);
3974 if (!DEBUG_INSN_P (insn))
3975 deps->pending_read_list_length -= removed;
3976 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3977 &deps->pending_write_mems);
3978 deps->pending_write_list_length -= removed;
3980 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3981 deps->pending_flush_length -= removed;
3982 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3983 deps->pending_flush_length -= removed;
3985 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3987 struct deps_reg *reg_last = &deps->reg_last[i];
3988 if (reg_last->uses)
3989 remove_from_dependence_list (insn, &reg_last->uses);
3990 if (reg_last->sets)
3991 remove_from_dependence_list (insn, &reg_last->sets);
3992 if (reg_last->implicit_sets)
3993 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3994 if (reg_last->clobbers)
3995 remove_from_dependence_list (insn, &reg_last->clobbers);
3996 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3997 && !reg_last->clobbers)
3998 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4001 if (CALL_P (insn))
4003 remove_from_dependence_list (insn, &deps->last_function_call);
4004 remove_from_dependence_list (insn,
4005 &deps->last_function_call_may_noreturn);
4007 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4010 /* Init deps data vector. */
4011 static void
4012 init_deps_data_vector (void)
4014 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4015 if (reserve > 0 && ! h_d_i_d.space (reserve))
4016 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4019 /* If it is profitable to use them, initialize or extend (depending on
4020 GLOBAL_P) dependency data. */
4021 void
4022 sched_deps_init (bool global_p)
4024 /* Average number of insns in the basic block.
4025 '+ 1' is used to make it nonzero. */
4026 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4028 init_deps_data_vector ();
4030 /* We use another caching mechanism for selective scheduling, so
4031 we don't use this one. */
4032 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4034 /* ?!? We could save some memory by computing a per-region luid mapping
4035 which could reduce both the number of vectors in the cache and the
4036 size of each vector. Instead we just avoid the cache entirely unless
4037 the average number of instructions in a basic block is very high. See
4038 the comment before the declaration of true_dependency_cache for
4039 what we consider "very high". */
4040 cache_size = 0;
4041 extend_dependency_caches (sched_max_luid, true);
4044 if (global_p)
4046 dl_pool = new object_allocator<_deps_list> ("deps_list");
4047 /* Allocate lists for one block at a time. */
4048 dn_pool = new object_allocator<_dep_node> ("dep_node");
4049 /* Allocate nodes for one block at a time. */
4054 /* Create or extend (depending on CREATE_P) dependency caches to
4055 size N. */
4056 void
4057 extend_dependency_caches (int n, bool create_p)
4059 if (create_p || true_dependency_cache)
4061 int i, luid = cache_size + n;
4063 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4064 luid);
4065 output_dependency_cache = XRESIZEVEC (bitmap_head,
4066 output_dependency_cache, luid);
4067 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4068 luid);
4069 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4070 luid);
4072 if (current_sched_info->flags & DO_SPECULATION)
4073 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4074 luid);
4076 for (i = cache_size; i < luid; i++)
4078 bitmap_initialize (&true_dependency_cache[i], 0);
4079 bitmap_initialize (&output_dependency_cache[i], 0);
4080 bitmap_initialize (&anti_dependency_cache[i], 0);
4081 bitmap_initialize (&control_dependency_cache[i], 0);
4083 if (current_sched_info->flags & DO_SPECULATION)
4084 bitmap_initialize (&spec_dependency_cache[i], 0);
4086 cache_size = luid;
4090 /* Finalize dependency information for the whole function. */
4091 void
4092 sched_deps_finish (void)
4094 gcc_assert (deps_pools_are_empty_p ());
4095 delete dn_pool;
4096 delete dl_pool;
4097 dn_pool = NULL;
4098 dl_pool = NULL;
4100 h_d_i_d.release ();
4101 cache_size = 0;
4103 if (true_dependency_cache)
4105 int i;
4107 for (i = 0; i < cache_size; i++)
4109 bitmap_clear (&true_dependency_cache[i]);
4110 bitmap_clear (&output_dependency_cache[i]);
4111 bitmap_clear (&anti_dependency_cache[i]);
4112 bitmap_clear (&control_dependency_cache[i]);
4114 if (sched_deps_info->generate_spec_deps)
4115 bitmap_clear (&spec_dependency_cache[i]);
4117 free (true_dependency_cache);
4118 true_dependency_cache = NULL;
4119 free (output_dependency_cache);
4120 output_dependency_cache = NULL;
4121 free (anti_dependency_cache);
4122 anti_dependency_cache = NULL;
4123 free (control_dependency_cache);
4124 control_dependency_cache = NULL;
4126 if (sched_deps_info->generate_spec_deps)
4128 free (spec_dependency_cache);
4129 spec_dependency_cache = NULL;
4135 /* Initialize some global variables needed by the dependency analysis
4136 code. */
4138 void
4139 init_deps_global (void)
4141 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4142 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4143 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4144 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4145 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4146 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4147 reg_pending_barrier = NOT_A_BARRIER;
4149 if (!sel_sched_p () || sched_emulate_haifa_p)
4151 sched_deps_info->start_insn = haifa_start_insn;
4152 sched_deps_info->finish_insn = haifa_finish_insn;
4154 sched_deps_info->note_reg_set = haifa_note_reg_set;
4155 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4156 sched_deps_info->note_reg_use = haifa_note_reg_use;
4158 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4159 sched_deps_info->note_dep = haifa_note_dep;
4163 /* Free everything used by the dependency analysis code. */
4165 void
4166 finish_deps_global (void)
4168 FREE_REG_SET (reg_pending_sets);
4169 FREE_REG_SET (reg_pending_clobbers);
4170 FREE_REG_SET (reg_pending_uses);
4171 FREE_REG_SET (reg_pending_control_uses);
4174 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4175 dw_t
4176 estimate_dep_weak (rtx mem1, rtx mem2)
4178 rtx r1, r2;
4180 if (mem1 == mem2)
4181 /* MEMs are the same - don't speculate. */
4182 return MIN_DEP_WEAK;
4184 r1 = XEXP (mem1, 0);
4185 r2 = XEXP (mem2, 0);
4187 if (r1 == r2
4188 || (REG_P (r1) && REG_P (r2)
4189 && REGNO (r1) == REGNO (r2)))
4190 /* Again, MEMs are the same. */
4191 return MIN_DEP_WEAK;
4192 else if ((REG_P (r1) && !REG_P (r2))
4193 || (!REG_P (r1) && REG_P (r2)))
4194 /* Different addressing modes - reason to be more speculative,
4195 than usual. */
4196 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4197 else
4198 /* We can't say anything about the dependence. */
4199 return UNCERTAIN_DEP_WEAK;
4202 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4203 This function can handle same INSN and ELEM (INSN == ELEM).
4204 It is a convenience wrapper. */
4205 static void
4206 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4208 ds_t ds;
4209 bool internal;
4211 if (dep_type == REG_DEP_TRUE)
4212 ds = DEP_TRUE;
4213 else if (dep_type == REG_DEP_OUTPUT)
4214 ds = DEP_OUTPUT;
4215 else if (dep_type == REG_DEP_CONTROL)
4216 ds = DEP_CONTROL;
4217 else
4219 gcc_assert (dep_type == REG_DEP_ANTI);
4220 ds = DEP_ANTI;
4223 /* When add_dependence is called from inside sched-deps.c, we expect
4224 cur_insn to be non-null. */
4225 internal = cur_insn != NULL;
4226 if (internal)
4227 gcc_assert (insn == cur_insn);
4228 else
4229 cur_insn = insn;
4231 note_dep (elem, ds);
4232 if (!internal)
4233 cur_insn = NULL;
4236 /* Return weakness of speculative type TYPE in the dep_status DS,
4237 without checking to prevent ICEs on malformed input. */
4238 static dw_t
4239 get_dep_weak_1 (ds_t ds, ds_t type)
4241 ds = ds & type;
4243 switch (type)
4245 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4246 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4247 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4248 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4249 default: gcc_unreachable ();
4252 return (dw_t) ds;
4255 /* Return weakness of speculative type TYPE in the dep_status DS. */
4256 dw_t
4257 get_dep_weak (ds_t ds, ds_t type)
4259 dw_t dw = get_dep_weak_1 (ds, type);
4261 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4262 return dw;
4265 /* Return the dep_status, which has the same parameters as DS, except for
4266 speculative type TYPE, that will have weakness DW. */
4267 ds_t
4268 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4270 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4272 ds &= ~type;
4273 switch (type)
4275 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4276 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4277 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4278 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4279 default: gcc_unreachable ();
4281 return ds;
4284 /* Return the join of two dep_statuses DS1 and DS2.
4285 If MAX_P is true then choose the greater probability,
4286 otherwise multiply probabilities.
4287 This function assumes that both DS1 and DS2 contain speculative bits. */
4288 static ds_t
4289 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4291 ds_t ds, t;
4293 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4295 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4297 t = FIRST_SPEC_TYPE;
4300 if ((ds1 & t) && !(ds2 & t))
4301 ds |= ds1 & t;
4302 else if (!(ds1 & t) && (ds2 & t))
4303 ds |= ds2 & t;
4304 else if ((ds1 & t) && (ds2 & t))
4306 dw_t dw1 = get_dep_weak (ds1, t);
4307 dw_t dw2 = get_dep_weak (ds2, t);
4308 ds_t dw;
4310 if (!max_p)
4312 dw = ((ds_t) dw1) * ((ds_t) dw2);
4313 dw /= MAX_DEP_WEAK;
4314 if (dw < MIN_DEP_WEAK)
4315 dw = MIN_DEP_WEAK;
4317 else
4319 if (dw1 >= dw2)
4320 dw = dw1;
4321 else
4322 dw = dw2;
4325 ds = set_dep_weak (ds, t, (dw_t) dw);
4328 if (t == LAST_SPEC_TYPE)
4329 break;
4330 t <<= SPEC_TYPE_SHIFT;
4332 while (1);
4334 return ds;
4337 /* Return the join of two dep_statuses DS1 and DS2.
4338 This function assumes that both DS1 and DS2 contain speculative bits. */
4339 ds_t
4340 ds_merge (ds_t ds1, ds_t ds2)
4342 return ds_merge_1 (ds1, ds2, false);
4345 /* Return the join of two dep_statuses DS1 and DS2. */
4346 ds_t
4347 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4349 ds_t new_status = ds | ds2;
4351 if (new_status & SPECULATIVE)
4353 if ((ds && !(ds & SPECULATIVE))
4354 || (ds2 && !(ds2 & SPECULATIVE)))
4355 /* Then this dep can't be speculative. */
4356 new_status &= ~SPECULATIVE;
4357 else
4359 /* Both are speculative. Merging probabilities. */
4360 if (mem1)
4362 dw_t dw;
4364 dw = estimate_dep_weak (mem1, mem2);
4365 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4368 if (!ds)
4369 new_status = ds2;
4370 else if (!ds2)
4371 new_status = ds;
4372 else
4373 new_status = ds_merge (ds2, ds);
4377 return new_status;
4380 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4381 probabilities. */
4382 ds_t
4383 ds_max_merge (ds_t ds1, ds_t ds2)
4385 if (ds1 == 0 && ds2 == 0)
4386 return 0;
4388 if (ds1 == 0 && ds2 != 0)
4389 return ds2;
4391 if (ds1 != 0 && ds2 == 0)
4392 return ds1;
4394 return ds_merge_1 (ds1, ds2, true);
4397 /* Return the probability of speculation success for the speculation
4398 status DS. */
4399 dw_t
4400 ds_weak (ds_t ds)
4402 ds_t res = 1, dt;
4403 int n = 0;
4405 dt = FIRST_SPEC_TYPE;
4408 if (ds & dt)
4410 res *= (ds_t) get_dep_weak (ds, dt);
4411 n++;
4414 if (dt == LAST_SPEC_TYPE)
4415 break;
4416 dt <<= SPEC_TYPE_SHIFT;
4418 while (1);
4420 gcc_assert (n);
4421 while (--n)
4422 res /= MAX_DEP_WEAK;
4424 if (res < MIN_DEP_WEAK)
4425 res = MIN_DEP_WEAK;
4427 gcc_assert (res <= MAX_DEP_WEAK);
4429 return (dw_t) res;
4432 /* Return a dep status that contains all speculation types of DS. */
4433 ds_t
4434 ds_get_speculation_types (ds_t ds)
4436 if (ds & BEGIN_DATA)
4437 ds |= BEGIN_DATA;
4438 if (ds & BE_IN_DATA)
4439 ds |= BE_IN_DATA;
4440 if (ds & BEGIN_CONTROL)
4441 ds |= BEGIN_CONTROL;
4442 if (ds & BE_IN_CONTROL)
4443 ds |= BE_IN_CONTROL;
4445 return ds & SPECULATIVE;
4448 /* Return a dep status that contains maximal weakness for each speculation
4449 type present in DS. */
4450 ds_t
4451 ds_get_max_dep_weak (ds_t ds)
4453 if (ds & BEGIN_DATA)
4454 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4455 if (ds & BE_IN_DATA)
4456 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4457 if (ds & BEGIN_CONTROL)
4458 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4459 if (ds & BE_IN_CONTROL)
4460 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4462 return ds;
4465 /* Dump information about the dependence status S. */
4466 static void
4467 dump_ds (FILE *f, ds_t s)
4469 fprintf (f, "{");
4471 if (s & BEGIN_DATA)
4472 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4473 if (s & BE_IN_DATA)
4474 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4475 if (s & BEGIN_CONTROL)
4476 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4477 if (s & BE_IN_CONTROL)
4478 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4480 if (s & HARD_DEP)
4481 fprintf (f, "HARD_DEP; ");
4483 if (s & DEP_TRUE)
4484 fprintf (f, "DEP_TRUE; ");
4485 if (s & DEP_OUTPUT)
4486 fprintf (f, "DEP_OUTPUT; ");
4487 if (s & DEP_ANTI)
4488 fprintf (f, "DEP_ANTI; ");
4489 if (s & DEP_CONTROL)
4490 fprintf (f, "DEP_CONTROL; ");
4492 fprintf (f, "}");
4495 DEBUG_FUNCTION void
4496 debug_ds (ds_t s)
4498 dump_ds (stderr, s);
4499 fprintf (stderr, "\n");
4502 /* Verify that dependence type and status are consistent.
4503 If RELAXED_P is true, then skip dep_weakness checks. */
4504 static void
4505 check_dep (dep_t dep, bool relaxed_p)
4507 enum reg_note dt = DEP_TYPE (dep);
4508 ds_t ds = DEP_STATUS (dep);
4510 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4512 if (!(current_sched_info->flags & USE_DEPS_LIST))
4514 gcc_assert (ds == 0);
4515 return;
4518 /* Check that dependence type contains the same bits as the status. */
4519 if (dt == REG_DEP_TRUE)
4520 gcc_assert (ds & DEP_TRUE);
4521 else if (dt == REG_DEP_OUTPUT)
4522 gcc_assert ((ds & DEP_OUTPUT)
4523 && !(ds & DEP_TRUE));
4524 else if (dt == REG_DEP_ANTI)
4525 gcc_assert ((ds & DEP_ANTI)
4526 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4527 else
4528 gcc_assert (dt == REG_DEP_CONTROL
4529 && (ds & DEP_CONTROL)
4530 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4532 /* HARD_DEP can not appear in dep_status of a link. */
4533 gcc_assert (!(ds & HARD_DEP));
4535 /* Check that dependence status is set correctly when speculation is not
4536 supported. */
4537 if (!sched_deps_info->generate_spec_deps)
4538 gcc_assert (!(ds & SPECULATIVE));
4539 else if (ds & SPECULATIVE)
4541 if (!relaxed_p)
4543 ds_t type = FIRST_SPEC_TYPE;
4545 /* Check that dependence weakness is in proper range. */
4548 if (ds & type)
4549 get_dep_weak (ds, type);
4551 if (type == LAST_SPEC_TYPE)
4552 break;
4553 type <<= SPEC_TYPE_SHIFT;
4555 while (1);
4558 if (ds & BEGIN_SPEC)
4560 /* Only true dependence can be data speculative. */
4561 if (ds & BEGIN_DATA)
4562 gcc_assert (ds & DEP_TRUE);
4564 /* Control dependencies in the insn scheduler are represented by
4565 anti-dependencies, therefore only anti dependence can be
4566 control speculative. */
4567 if (ds & BEGIN_CONTROL)
4568 gcc_assert (ds & DEP_ANTI);
4570 else
4572 /* Subsequent speculations should resolve true dependencies. */
4573 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4576 /* Check that true and anti dependencies can't have other speculative
4577 statuses. */
4578 if (ds & DEP_TRUE)
4579 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4580 /* An output dependence can't be speculative at all. */
4581 gcc_assert (!(ds & DEP_OUTPUT));
4582 if (ds & DEP_ANTI)
4583 gcc_assert (ds & BEGIN_CONTROL);
4587 /* The following code discovers opportunities to switch a memory reference
4588 and an increment by modifying the address. We ensure that this is done
4589 only for dependencies that are only used to show a single register
4590 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4591 instruction involved is subject to only one dep that can cause a pattern
4592 change.
4594 When we discover a suitable dependency, we fill in the dep_replacement
4595 structure to show how to modify the memory reference. */
4597 /* Holds information about a pair of memory reference and register increment
4598 insns which depend on each other, but could possibly be interchanged. */
4599 struct mem_inc_info
4601 rtx_insn *inc_insn;
4602 rtx_insn *mem_insn;
4604 rtx *mem_loc;
4605 /* A register occurring in the memory address for which we wish to break
4606 the dependence. This must be identical to the destination register of
4607 the increment. */
4608 rtx mem_reg0;
4609 /* Any kind of index that is added to that register. */
4610 rtx mem_index;
4611 /* The constant offset used in the memory address. */
4612 HOST_WIDE_INT mem_constant;
4613 /* The constant added in the increment insn. Negated if the increment is
4614 after the memory address. */
4615 HOST_WIDE_INT inc_constant;
4616 /* The source register used in the increment. May be different from mem_reg0
4617 if the increment occurs before the memory address. */
4618 rtx inc_input;
4621 /* Verify that the memory location described in MII can be replaced with
4622 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4623 insn remains unchanged by this function. */
4625 static rtx
4626 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4628 rtx mem = *mii->mem_loc;
4629 rtx new_mem;
4631 /* Jump through a lot of hoops to keep the attributes up to date. We
4632 do not want to call one of the change address variants that take
4633 an offset even though we know the offset in many cases. These
4634 assume you are changing where the address is pointing by the
4635 offset. */
4636 new_mem = replace_equiv_address_nv (mem, new_addr);
4637 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4639 if (sched_verbose >= 5)
4640 fprintf (sched_dump, "validation failure\n");
4641 return NULL_RTX;
4644 /* Put back the old one. */
4645 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4647 return new_mem;
4650 /* Return true if INSN is of a form "a = b op c" where a and b are
4651 regs. op is + if c is a reg and +|- if c is a const. Fill in
4652 informantion in MII about what is found.
4653 BEFORE_MEM indicates whether the increment is found before or after
4654 a corresponding memory reference. */
4656 static bool
4657 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4659 rtx pat = single_set (insn);
4660 rtx src, cst;
4661 bool regs_equal;
4663 if (RTX_FRAME_RELATED_P (insn) || !pat)
4664 return false;
4666 /* Result must be single reg. */
4667 if (!REG_P (SET_DEST (pat)))
4668 return false;
4670 if (GET_CODE (SET_SRC (pat)) != PLUS)
4671 return false;
4673 mii->inc_insn = insn;
4674 src = SET_SRC (pat);
4675 mii->inc_input = XEXP (src, 0);
4677 if (!REG_P (XEXP (src, 0)))
4678 return false;
4680 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4681 return false;
4683 cst = XEXP (src, 1);
4684 if (!CONST_INT_P (cst))
4685 return false;
4686 mii->inc_constant = INTVAL (cst);
4688 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4690 if (!before_mem)
4692 mii->inc_constant = -mii->inc_constant;
4693 if (!regs_equal)
4694 return false;
4697 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4699 /* Note that the sign has already been reversed for !before_mem. */
4700 if (STACK_GROWS_DOWNWARD)
4701 return mii->inc_constant > 0;
4702 else
4703 return mii->inc_constant < 0;
4705 return true;
4708 /* Once a suitable mem reference has been found and the corresponding data
4709 in MII has been filled in, this function is called to find a suitable
4710 add or inc insn involving the register we found in the memory
4711 reference. */
4713 static bool
4714 find_inc (struct mem_inc_info *mii, bool backwards)
4716 sd_iterator_def sd_it;
4717 dep_t dep;
4719 sd_it = sd_iterator_start (mii->mem_insn,
4720 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4721 while (sd_iterator_cond (&sd_it, &dep))
4723 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4724 rtx_insn *pro = DEP_PRO (dep);
4725 rtx_insn *con = DEP_CON (dep);
4726 rtx_insn *inc_cand = backwards ? pro : con;
4727 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4728 goto next;
4729 if (parse_add_or_inc (mii, inc_cand, backwards))
4731 struct dep_replacement *desc;
4732 df_ref def;
4733 rtx newaddr, newmem;
4735 if (sched_verbose >= 5)
4736 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4737 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4739 /* Need to assure that none of the operands of the inc
4740 instruction are assigned to by the mem insn. */
4741 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4742 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4743 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4745 if (sched_verbose >= 5)
4746 fprintf (sched_dump,
4747 "inc conflicts with store failure.\n");
4748 goto next;
4751 newaddr = mii->inc_input;
4752 if (mii->mem_index != NULL_RTX)
4753 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4754 mii->mem_index);
4755 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4756 mii->mem_constant + mii->inc_constant);
4757 newmem = attempt_change (mii, newaddr);
4758 if (newmem == NULL_RTX)
4759 goto next;
4760 if (sched_verbose >= 5)
4761 fprintf (sched_dump, "successful address replacement\n");
4762 desc = XCNEW (struct dep_replacement);
4763 DEP_REPLACE (dep) = desc;
4764 desc->loc = mii->mem_loc;
4765 desc->newval = newmem;
4766 desc->orig = *desc->loc;
4767 desc->insn = mii->mem_insn;
4768 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4769 INSN_SPEC_BACK_DEPS (con));
4770 if (backwards)
4772 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4773 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4774 REG_DEP_TRUE);
4776 else
4778 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4779 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4780 REG_DEP_ANTI);
4782 return true;
4784 next:
4785 sd_iterator_next (&sd_it);
4787 return false;
4790 /* A recursive function that walks ADDRESS_OF_X to find memory references
4791 which could be modified during scheduling. We call find_inc for each
4792 one we find that has a recognizable form. MII holds information about
4793 the pair of memory/increment instructions.
4794 We ensure that every instruction with a memory reference (which will be
4795 the location of the replacement) is assigned at most one breakable
4796 dependency. */
4798 static bool
4799 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4801 rtx x = *address_of_x;
4802 enum rtx_code code = GET_CODE (x);
4803 const char *const fmt = GET_RTX_FORMAT (code);
4804 int i;
4806 if (code == MEM)
4808 rtx reg0 = XEXP (x, 0);
4810 mii->mem_loc = address_of_x;
4811 mii->mem_index = NULL_RTX;
4812 mii->mem_constant = 0;
4813 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4815 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4816 reg0 = XEXP (reg0, 0);
4818 if (GET_CODE (reg0) == PLUS)
4820 mii->mem_index = XEXP (reg0, 1);
4821 reg0 = XEXP (reg0, 0);
4823 if (REG_P (reg0))
4825 df_ref use;
4826 int occurrences = 0;
4828 /* Make sure this reg appears only once in this insn. Can't use
4829 count_occurrences since that only works for pseudos. */
4830 FOR_EACH_INSN_USE (use, mii->mem_insn)
4831 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4832 if (++occurrences > 1)
4834 if (sched_verbose >= 5)
4835 fprintf (sched_dump, "mem count failure\n");
4836 return false;
4839 mii->mem_reg0 = reg0;
4840 return find_inc (mii, true) || find_inc (mii, false);
4842 return false;
4845 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4847 /* If REG occurs inside a MEM used in a bit-field reference,
4848 that is unacceptable. */
4849 return false;
4852 /* Time for some deep diving. */
4853 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4855 if (fmt[i] == 'e')
4857 if (find_mem (mii, &XEXP (x, i)))
4858 return true;
4860 else if (fmt[i] == 'E')
4862 int j;
4863 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4864 if (find_mem (mii, &XVECEXP (x, i, j)))
4865 return true;
4868 return false;
4872 /* Examine the instructions between HEAD and TAIL and try to find
4873 dependencies that can be broken by modifying one of the patterns. */
4875 void
4876 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4878 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4879 int success_in_block = 0;
4881 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4883 struct mem_inc_info mii;
4885 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4886 continue;
4888 mii.mem_insn = insn;
4889 if (find_mem (&mii, &PATTERN (insn)))
4890 success_in_block++;
4892 if (success_in_block && sched_verbose >= 5)
4893 fprintf (sched_dump, "%d candidates for address modification found.\n",
4894 success_in_block);
4897 #endif /* INSN_SCHEDULING */