1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
41 #include "sched-int.h"
47 static regset reg_pending_sets
;
48 static regset reg_pending_clobbers
;
49 static regset reg_pending_uses
;
51 /* The following enumeration values tell us what dependencies we
52 should use to implement the barrier. We use true-dependencies for
53 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
54 enum reg_pending_barrier_mode
61 static enum reg_pending_barrier_mode reg_pending_barrier
;
63 /* To speed up the test for duplicate dependency links we keep a
64 record of dependencies created by add_dependence when the average
65 number of instructions in a basic block is very large.
67 Studies have shown that there is typically around 5 instructions between
68 branches for typical C code. So we can make a guess that the average
69 basic block is approximately 5 instructions long; we will choose 100X
70 the average size as a very large basic block.
72 Each insn has associated bitmaps for its dependencies. Each bitmap
73 has enough entries to represent a dependency on any other insn in
74 the insn chain. All bitmap for true dependencies cache is
75 allocated then the rest two ones are also allocated. */
76 static bitmap_head
*true_dependency_cache
;
77 static bitmap_head
*output_dependency_cache
;
78 static bitmap_head
*anti_dependency_cache
;
79 static bitmap_head
*spec_dependency_cache
;
80 static int cache_size
;
82 /* To speed up checking consistency of formed forward insn
83 dependencies we use the following cache. Another possible solution
84 could be switching off checking duplication of insns in forward
86 #ifdef ENABLE_CHECKING
87 static bitmap_head
*forward_dependency_cache
;
90 static int deps_may_trap_p (rtx
);
91 static void add_dependence_list (rtx
, rtx
, int, enum reg_note
);
92 static void add_dependence_list_and_free (rtx
, rtx
*, int, enum reg_note
);
93 static void delete_all_dependences (rtx
);
94 static void fixup_sched_groups (rtx
);
96 static void flush_pending_lists (struct deps
*, rtx
, int, int);
97 static void sched_analyze_1 (struct deps
*, rtx
, rtx
);
98 static void sched_analyze_2 (struct deps
*, rtx
, rtx
);
99 static void sched_analyze_insn (struct deps
*, rtx
, rtx
);
101 static rtx
sched_get_condition (rtx
);
102 static int conditions_mutex_p (rtx
, rtx
);
104 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_back_dep_1 (rtx
, rtx
,
105 enum reg_note
, ds_t
, rtx
, rtx
, rtx
**);
106 static enum DEPS_ADJUST_RESULT
add_or_update_back_dep_1 (rtx
, rtx
,
107 enum reg_note
, ds_t
, rtx
, rtx
, rtx
**);
108 static void add_back_dep (rtx
, rtx
, enum reg_note
, ds_t
);
110 static void adjust_add_sorted_back_dep (rtx
, rtx
, rtx
*);
111 static void adjust_back_add_forw_dep (rtx
, rtx
*);
112 static void delete_forw_dep (rtx
, rtx
);
113 static dw_t
estimate_dep_weak (rtx
, rtx
);
114 #ifdef INSN_SCHEDULING
115 #ifdef ENABLE_CHECKING
116 static void check_dep_status (enum reg_note
, ds_t
, bool);
120 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
123 deps_may_trap_p (rtx mem
)
125 rtx addr
= XEXP (mem
, 0);
127 if (REG_P (addr
) && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
)
129 rtx t
= get_reg_known_value (REGNO (addr
));
133 return rtx_addr_can_trap_p (addr
);
136 /* Return the INSN_LIST containing INSN in LIST, or NULL
137 if LIST does not contain INSN. */
140 find_insn_list (rtx insn
, rtx list
)
144 if (XEXP (list
, 0) == insn
)
146 list
= XEXP (list
, 1);
151 /* Find the condition under which INSN is executed. */
154 sched_get_condition (rtx insn
)
156 rtx pat
= PATTERN (insn
);
162 if (GET_CODE (pat
) == COND_EXEC
)
163 return COND_EXEC_TEST (pat
);
165 if (!any_condjump_p (insn
) || !onlyjump_p (insn
))
168 src
= SET_SRC (pc_set (insn
));
170 if (XEXP (src
, 2) == pc_rtx
)
171 return XEXP (src
, 0);
172 else if (XEXP (src
, 1) == pc_rtx
)
174 rtx cond
= XEXP (src
, 0);
175 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
177 if (revcode
== UNKNOWN
)
179 return gen_rtx_fmt_ee (revcode
, GET_MODE (cond
), XEXP (cond
, 0),
187 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
190 conditions_mutex_p (rtx cond1
, rtx cond2
)
192 if (COMPARISON_P (cond1
)
193 && COMPARISON_P (cond2
)
194 && GET_CODE (cond1
) == reversed_comparison_code (cond2
, NULL
)
195 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
196 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
201 /* Return true if insn1 and insn2 can never depend on one another because
202 the conditions under which they are executed are mutually exclusive. */
204 sched_insns_conditions_mutex_p (rtx insn1
, rtx insn2
)
208 /* flow.c doesn't handle conditional lifetimes entirely correctly;
209 calls mess up the conditional lifetimes. */
210 if (!CALL_P (insn1
) && !CALL_P (insn2
))
212 cond1
= sched_get_condition (insn1
);
213 cond2
= sched_get_condition (insn2
);
215 && conditions_mutex_p (cond1
, cond2
)
216 /* Make sure first instruction doesn't affect condition of second
217 instruction if switched. */
218 && !modified_in_p (cond1
, insn2
)
219 /* Make sure second instruction doesn't affect condition of first
220 instruction if switched. */
221 && !modified_in_p (cond2
, insn1
))
227 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
228 LOG_LINKS of INSN, if it is not already there. DEP_TYPE indicates the
229 type of dependence that this link represents. DS, if nonzero,
230 indicates speculations, through which this dependence can be overcome.
231 MEM1 and MEM2, if non-null, corresponds to memory locations in case of
232 data speculation. The function returns a value indicating if an old entry
233 has been changed or a new entry has been added to insn's LOG_LINK.
234 In case of changed entry CHANGED_LINKPP sets to its address.
235 See also the definition of enum DEPS_ADJUST_RESULT in sched-int.h.
236 Actual manipulation of dependence data structures is performed in
237 add_or_update_back_dep_1. */
239 static enum DEPS_ADJUST_RESULT
240 maybe_add_or_update_back_dep_1 (rtx insn
, rtx elem
, enum reg_note dep_type
,
241 ds_t ds
, rtx mem1
, rtx mem2
,
242 rtx
**changed_linkpp
)
244 gcc_assert (INSN_P (insn
) && INSN_P (elem
));
246 /* Don't depend an insn on itself. */
249 #ifdef INSN_SCHEDULING
250 if (current_sched_info
->flags
& DO_SPECULATION
)
251 /* INSN has an internal dependence, which we can't overcome. */
252 HAS_INTERNAL_DEP (insn
) = 1;
257 return add_or_update_back_dep_1 (insn
, elem
, dep_type
,
258 ds
, mem1
, mem2
, changed_linkpp
);
261 /* This function has the same meaning of parameters and return values
262 as maybe_add_or_update_back_dep_1. The only difference between these
263 two functions is that INSN and ELEM are guaranteed not to be the same
265 static enum DEPS_ADJUST_RESULT
266 add_or_update_back_dep_1 (rtx insn
, rtx elem
, enum reg_note dep_type
,
267 ds_t ds ATTRIBUTE_UNUSED
,
268 rtx mem1 ATTRIBUTE_UNUSED
, rtx mem2 ATTRIBUTE_UNUSED
,
269 rtx
**changed_linkpp ATTRIBUTE_UNUSED
)
271 bool maybe_present_p
= true, present_p
= false;
273 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
275 #ifdef INSN_SCHEDULING
277 #ifdef ENABLE_CHECKING
278 check_dep_status (dep_type
, ds
, mem1
!= NULL
);
281 /* If we already have a dependency for ELEM, then we do not need to
282 do anything. Avoiding the list walk below can cut compile times
283 dramatically for some code. */
284 if (true_dependency_cache
!= NULL
)
286 enum reg_note present_dep_type
;
288 gcc_assert (output_dependency_cache
);
289 gcc_assert (anti_dependency_cache
);
290 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
292 if (bitmap_bit_p (&true_dependency_cache
[INSN_LUID (insn
)],
294 present_dep_type
= REG_DEP_TRUE
;
295 else if (bitmap_bit_p (&output_dependency_cache
[INSN_LUID (insn
)],
297 present_dep_type
= REG_DEP_OUTPUT
;
298 else if (bitmap_bit_p (&anti_dependency_cache
[INSN_LUID (insn
)],
300 present_dep_type
= REG_DEP_ANTI
;
302 maybe_present_p
= false;
306 if ((int) dep_type
>= (int) present_dep_type
)
314 ds_t present_dep_types
= 0;
316 if (bitmap_bit_p (&true_dependency_cache
[INSN_LUID (insn
)],
318 present_dep_types
|= DEP_TRUE
;
319 if (bitmap_bit_p (&output_dependency_cache
[INSN_LUID (insn
)],
321 present_dep_types
|= DEP_OUTPUT
;
322 if (bitmap_bit_p (&anti_dependency_cache
[INSN_LUID (insn
)],
324 present_dep_types
|= DEP_ANTI
;
326 if (present_dep_types
)
328 if (!(current_sched_info
->flags
& DO_SPECULATION
)
329 || !bitmap_bit_p (&spec_dependency_cache
[INSN_LUID (insn
)],
332 if ((present_dep_types
| (ds
& DEP_TYPES
))
333 == present_dep_types
)
334 /* We already have all these bits. */
339 /* Only true dependencies can be data speculative and
340 only anti dependencies can be control speculative. */
341 gcc_assert ((present_dep_types
& (DEP_TRUE
| DEP_ANTI
))
342 == present_dep_types
);
344 /* if (additional dep is SPECULATIVE) then
345 we should update DEP_STATUS
347 we should reset existing dep to non-speculative. */
353 maybe_present_p
= false;
358 /* Check that we don't already have this dependence. */
363 for (linkp
= &LOG_LINKS (insn
); *linkp
; linkp
= &XEXP (*linkp
, 1))
367 gcc_assert (true_dependency_cache
== 0 || present_p
);
369 if (XEXP (link
, 0) == elem
)
371 enum DEPS_ADJUST_RESULT changed_p
= DEP_PRESENT
;
373 #ifdef INSN_SCHEDULING
374 if (current_sched_info
->flags
& USE_DEPS_LIST
)
376 ds_t new_status
= ds
| DEP_STATUS (link
);
378 if (new_status
& SPECULATIVE
)
380 if (!(ds
& SPECULATIVE
)
381 || !(DEP_STATUS (link
) & SPECULATIVE
))
382 /* Then this dep can't be speculative. */
384 new_status
&= ~SPECULATIVE
;
385 if (true_dependency_cache
386 && (DEP_STATUS (link
) & SPECULATIVE
))
387 bitmap_clear_bit (&spec_dependency_cache
393 /* Both are speculative. Merging probabilities. */
398 dw
= estimate_dep_weak (mem1
, mem2
);
399 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
402 new_status
= ds_merge (DEP_STATUS (link
), ds
);
409 /* Clear corresponding cache entry because type of the link
410 may have changed. Keep them if we use_deps_list. */
411 if (true_dependency_cache
!= NULL
412 && !(current_sched_info
->flags
& USE_DEPS_LIST
))
414 enum reg_note kind
= REG_NOTE_KIND (link
);
419 bitmap_clear_bit (&output_dependency_cache
420 [INSN_LUID (insn
)], INSN_LUID (elem
));
423 bitmap_clear_bit (&anti_dependency_cache
424 [INSN_LUID (insn
)], INSN_LUID (elem
));
431 if ((current_sched_info
->flags
& USE_DEPS_LIST
)
432 && DEP_STATUS (link
) != ds
)
434 DEP_STATUS (link
) = ds
;
435 changed_p
= DEP_CHANGED
;
439 /* If this is a more restrictive type of dependence than the
440 existing one, then change the existing dependence to this
442 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
444 PUT_REG_NOTE_KIND (link
, dep_type
);
445 changed_p
= DEP_CHANGED
;
448 #ifdef INSN_SCHEDULING
449 /* If we are adding a dependency to INSN's LOG_LINKs, then
450 note that in the bitmap caches of dependency information. */
451 if (true_dependency_cache
!= NULL
)
453 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
455 if (REG_NOTE_KIND (link
) == REG_DEP_TRUE
)
456 bitmap_set_bit (&true_dependency_cache
457 [INSN_LUID (insn
)], INSN_LUID (elem
));
458 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
459 bitmap_set_bit (&output_dependency_cache
460 [INSN_LUID (insn
)], INSN_LUID (elem
));
461 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
462 bitmap_set_bit (&anti_dependency_cache
463 [INSN_LUID (insn
)], INSN_LUID (elem
));
468 bitmap_set_bit (&true_dependency_cache
469 [INSN_LUID (insn
)], INSN_LUID (elem
));
471 bitmap_set_bit (&output_dependency_cache
472 [INSN_LUID (insn
)], INSN_LUID (elem
));
474 bitmap_set_bit (&anti_dependency_cache
475 [INSN_LUID (insn
)], INSN_LUID (elem
));
476 /* Note, that dep can become speculative only
477 at the moment of creation. Thus, we don't need to
478 check for it here. */
482 if (changed_linkpp
&& changed_p
== DEP_CHANGED
)
483 *changed_linkpp
= linkp
;
488 /* We didn't find a dep. It shouldn't be present in the cache. */
489 gcc_assert (!present_p
);
492 /* Might want to check one level of transitivity to save conses.
493 This check should be done in maybe_add_or_update_back_dep_1.
494 Since we made it to add_or_update_back_dep_1, we must create
495 (or update) a link. */
499 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
500 ds
= set_dep_weak (ds
, BEGIN_DATA
, estimate_dep_weak (mem1
, mem2
));
503 add_back_dep (insn
, elem
, dep_type
, ds
);
508 /* This function creates a link between INSN and ELEM under any
509 conditions. DS describes speculative status of the link. */
511 add_back_dep (rtx insn
, rtx elem
, enum reg_note dep_type
, ds_t ds
)
513 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
515 if (current_sched_info
->flags
& USE_DEPS_LIST
)
516 LOG_LINKS (insn
) = alloc_DEPS_LIST (elem
, LOG_LINKS (insn
), ds
);
518 LOG_LINKS (insn
) = alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
520 /* Insn dependency, not data dependency. */
521 PUT_REG_NOTE_KIND (LOG_LINKS (insn
), dep_type
);
523 #ifdef INSN_SCHEDULING
524 #ifdef ENABLE_CHECKING
525 check_dep_status (dep_type
, ds
, false);
528 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
529 in the bitmap caches of dependency information. */
530 if (true_dependency_cache
!= NULL
)
532 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
534 if (dep_type
== REG_DEP_TRUE
)
535 bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn
)],
537 else if (dep_type
== REG_DEP_OUTPUT
)
538 bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn
)],
540 else if (dep_type
== REG_DEP_ANTI
)
541 bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
547 bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn
)],
550 bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn
)],
553 bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
555 if (ds
& SPECULATIVE
)
557 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
558 bitmap_set_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
566 /* A convenience wrapper to operate on an entire list. */
569 add_dependence_list (rtx insn
, rtx list
, int uncond
, enum reg_note dep_type
)
571 for (; list
; list
= XEXP (list
, 1))
573 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
574 add_dependence (insn
, XEXP (list
, 0), dep_type
);
578 /* Similar, but free *LISTP at the same time. */
581 add_dependence_list_and_free (rtx insn
, rtx
*listp
, int uncond
,
582 enum reg_note dep_type
)
585 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
587 next
= XEXP (list
, 1);
588 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
589 add_dependence (insn
, XEXP (list
, 0), dep_type
);
590 free_INSN_LIST_node (list
);
594 /* Clear all dependencies for an insn. */
597 delete_all_dependences (rtx insn
)
599 /* Clear caches, if they exist, as well as free the dependence. */
601 #ifdef INSN_SCHEDULING
602 if (true_dependency_cache
!= NULL
)
604 bitmap_clear (&true_dependency_cache
[INSN_LUID (insn
)]);
605 bitmap_clear (&output_dependency_cache
[INSN_LUID (insn
)]);
606 bitmap_clear (&anti_dependency_cache
[INSN_LUID (insn
)]);
607 /* We don't have to clear forward_dependency_cache here,
608 because it is formed later. */
609 if (current_sched_info
->flags
& DO_SPECULATION
)
610 bitmap_clear (&spec_dependency_cache
[INSN_LUID (insn
)]);
614 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
615 /* In this case LOG_LINKS are formed from the DEPS_LISTs,
616 not the INSN_LISTs. */
617 free_INSN_LIST_list (&LOG_LINKS (insn
));
619 free_DEPS_LIST_list (&LOG_LINKS (insn
));
622 /* All insns in a scheduling group except the first should only have
623 dependencies on the previous insn in the group. So we find the
624 first instruction in the scheduling group by walking the dependence
625 chains backwards. Then we add the dependencies for the group to
626 the previous nonnote insn. */
629 fixup_sched_groups (rtx insn
)
631 rtx link
, prev_nonnote
;
633 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
638 i
= prev_nonnote_insn (i
);
640 if (XEXP (link
, 0) == i
)
642 } while (SCHED_GROUP_P (i
));
643 if (! sched_insns_conditions_mutex_p (i
, XEXP (link
, 0)))
644 add_dependence (i
, XEXP (link
, 0), REG_NOTE_KIND (link
));
648 delete_all_dependences (insn
);
650 prev_nonnote
= prev_nonnote_insn (insn
);
651 if (BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (prev_nonnote
)
652 && ! sched_insns_conditions_mutex_p (insn
, prev_nonnote
))
653 add_dependence (insn
, prev_nonnote
, REG_DEP_ANTI
);
656 /* Process an insn's memory dependencies. There are four kinds of
659 (0) read dependence: read follows read
660 (1) true dependence: read follows write
661 (2) output dependence: write follows write
662 (3) anti dependence: write follows read
664 We are careful to build only dependencies which actually exist, and
665 use transitivity to avoid building too many links. */
667 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
668 The MEM is a memory reference contained within INSN, which we are saving
669 so that we can do memory aliasing on it. */
672 add_insn_mem_dependence (struct deps
*deps
, rtx
*insn_list
, rtx
*mem_list
,
677 link
= alloc_INSN_LIST (insn
, *insn_list
);
680 if (current_sched_info
->use_cselib
)
682 mem
= shallow_copy_rtx (mem
);
683 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
685 link
= alloc_EXPR_LIST (VOIDmode
, canon_rtx (mem
), *mem_list
);
688 deps
->pending_lists_length
++;
691 /* Make a dependency between every memory reference on the pending lists
692 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
693 dependencies for a read operation, similarly with FOR_WRITE. */
696 flush_pending_lists (struct deps
*deps
, rtx insn
, int for_read
,
701 add_dependence_list_and_free (insn
, &deps
->pending_read_insns
, 1,
703 free_EXPR_LIST_list (&deps
->pending_read_mems
);
706 add_dependence_list_and_free (insn
, &deps
->pending_write_insns
, 1,
707 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
708 free_EXPR_LIST_list (&deps
->pending_write_mems
);
709 deps
->pending_lists_length
= 0;
711 add_dependence_list_and_free (insn
, &deps
->last_pending_memory_flush
, 1,
712 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
713 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
714 deps
->pending_flush_length
= 1;
717 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
718 The type of the reference is specified by REF and can be SET,
719 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
722 sched_analyze_reg (struct deps
*deps
, int regno
, enum machine_mode mode
,
723 enum rtx_code ref
, rtx insn
)
725 /* A hard reg in a wide mode may really be multiple registers.
726 If so, mark all of them just like the first. */
727 if (regno
< FIRST_PSEUDO_REGISTER
)
729 int i
= hard_regno_nregs
[regno
][mode
];
733 SET_REGNO_REG_SET (reg_pending_sets
, regno
+ i
);
738 SET_REGNO_REG_SET (reg_pending_uses
, regno
+ i
);
743 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
+ i
);
747 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
748 it does not reload. Ignore these as they have served their
750 else if (regno
>= deps
->max_reg
)
752 enum rtx_code code
= GET_CODE (PATTERN (insn
));
753 gcc_assert (code
== USE
|| code
== CLOBBER
);
759 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
761 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
763 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
765 /* Pseudos that are REG_EQUIV to something may be replaced
766 by that during reloading. We need only add dependencies for
767 the address in the REG_EQUIV note. */
768 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
770 rtx t
= get_reg_known_value (regno
);
772 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
775 /* Don't let it cross a call after scheduling if it doesn't
776 already cross one. */
777 if (REG_N_CALLS_CROSSED (regno
) == 0)
780 deps
->sched_before_next_call
781 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
783 add_dependence_list (insn
, deps
->last_function_call
, 1,
789 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
790 rtx, X, creating all dependencies generated by the write to the
791 destination of X, and reads of everything mentioned. */
794 sched_analyze_1 (struct deps
*deps
, rtx x
, rtx insn
)
796 rtx dest
= XEXP (x
, 0);
797 enum rtx_code code
= GET_CODE (x
);
802 if (GET_CODE (dest
) == PARALLEL
)
806 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
807 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
808 sched_analyze_1 (deps
,
809 gen_rtx_CLOBBER (VOIDmode
,
810 XEXP (XVECEXP (dest
, 0, i
), 0)),
813 if (GET_CODE (x
) == SET
)
814 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
818 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
819 || GET_CODE (dest
) == ZERO_EXTRACT
)
821 if (GET_CODE (dest
) == STRICT_LOW_PART
822 || GET_CODE (dest
) == ZERO_EXTRACT
823 || df_read_modify_subreg_p (dest
))
825 /* These both read and modify the result. We must handle
826 them as writes to get proper dependencies for following
827 instructions. We must handle them as reads to get proper
828 dependencies from this to previous instructions.
829 Thus we need to call sched_analyze_2. */
831 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
833 if (GET_CODE (dest
) == ZERO_EXTRACT
)
835 /* The second and third arguments are values read by this insn. */
836 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
837 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
839 dest
= XEXP (dest
, 0);
844 int regno
= REGNO (dest
);
845 enum machine_mode mode
= GET_MODE (dest
);
847 sched_analyze_reg (deps
, regno
, mode
, code
, insn
);
850 /* Treat all writes to a stack register as modifying the TOS. */
851 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
853 /* Avoid analyzing the same register twice. */
854 if (regno
!= FIRST_STACK_REG
)
855 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, code
, insn
);
856 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
860 else if (MEM_P (dest
))
862 /* Writing memory. */
865 if (current_sched_info
->use_cselib
)
867 t
= shallow_copy_rtx (dest
);
868 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
869 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
873 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
875 /* Flush all pending reads and writes to prevent the pending lists
876 from getting any larger. Insn scheduling runs too slowly when
877 these lists get long. When compiling GCC with itself,
878 this flush occurs 8 times for sparc, and 10 times for m88k using
879 the default value of 32. */
880 flush_pending_lists (deps
, insn
, false, true);
884 rtx pending
, pending_mem
;
886 pending
= deps
->pending_read_insns
;
887 pending_mem
= deps
->pending_read_mems
;
890 if (anti_dependence (XEXP (pending_mem
, 0), t
)
891 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
892 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
894 pending
= XEXP (pending
, 1);
895 pending_mem
= XEXP (pending_mem
, 1);
898 pending
= deps
->pending_write_insns
;
899 pending_mem
= deps
->pending_write_mems
;
902 if (output_dependence (XEXP (pending_mem
, 0), t
)
903 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
904 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
906 pending
= XEXP (pending
, 1);
907 pending_mem
= XEXP (pending_mem
, 1);
910 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
913 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
914 &deps
->pending_write_mems
, insn
, dest
);
916 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
920 if (GET_CODE (x
) == SET
)
921 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
924 /* Analyze the uses of memory and registers in rtx X in INSN. */
927 sched_analyze_2 (struct deps
*deps
, rtx x
, rtx insn
)
947 /* Ignore constants. Note that we must handle CONST_DOUBLE here
948 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
949 this does not mean that this insn is using cc0. */
954 /* User of CC0 depends on immediately preceding insn. */
955 SCHED_GROUP_P (insn
) = 1;
956 /* Don't move CC0 setter to another block (it can set up the
957 same flag for previous CC0 users which is safe). */
958 CANT_MOVE (prev_nonnote_insn (insn
)) = 1;
964 int regno
= REGNO (x
);
965 enum machine_mode mode
= GET_MODE (x
);
967 sched_analyze_reg (deps
, regno
, mode
, USE
, insn
);
970 /* Treat all reads of a stack register as modifying the TOS. */
971 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
973 /* Avoid analyzing the same register twice. */
974 if (regno
!= FIRST_STACK_REG
)
975 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
976 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, SET
, insn
);
984 /* Reading memory. */
986 rtx pending
, pending_mem
;
989 if (current_sched_info
->use_cselib
)
991 t
= shallow_copy_rtx (t
);
992 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
993 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
996 pending
= deps
->pending_read_insns
;
997 pending_mem
= deps
->pending_read_mems
;
1000 if (read_dependence (XEXP (pending_mem
, 0), t
)
1001 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1002 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
1004 pending
= XEXP (pending
, 1);
1005 pending_mem
= XEXP (pending_mem
, 1);
1008 pending
= deps
->pending_write_insns
;
1009 pending_mem
= deps
->pending_write_mems
;
1012 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
1014 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1016 if (current_sched_info
->flags
& DO_SPECULATION
)
1017 maybe_add_or_update_back_dep_1 (insn
, XEXP (pending
, 0),
1019 BEGIN_DATA
| DEP_TRUE
,
1020 XEXP (pending_mem
, 0), t
, 0);
1022 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_TRUE
);
1025 pending
= XEXP (pending
, 1);
1026 pending_mem
= XEXP (pending_mem
, 1);
1029 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
1030 if (! JUMP_P (XEXP (u
, 0)) || deps_may_trap_p (x
))
1031 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1033 /* Always add these dependencies to pending_reads, since
1034 this insn may be followed by a write. */
1035 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
1036 &deps
->pending_read_mems
, insn
, x
);
1038 /* Take advantage of tail recursion here. */
1039 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
1043 /* Force pending stores to memory in case a trap handler needs them. */
1045 flush_pending_lists (deps
, insn
, true, false);
1050 case UNSPEC_VOLATILE
:
1052 /* Traditional and volatile asm instructions must be considered to use
1053 and clobber all hard registers, all pseudo-registers and all of
1054 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1056 Consider for instance a volatile asm that changes the fpu rounding
1057 mode. An insn should not be moved across this even if it only uses
1058 pseudo-regs because it might give an incorrectly rounded result. */
1059 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
1060 reg_pending_barrier
= TRUE_BARRIER
;
1062 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1063 We can not just fall through here since then we would be confused
1064 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1065 traditional asms unlike their normal usage. */
1067 if (code
== ASM_OPERANDS
)
1069 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
1070 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
1080 /* These both read and modify the result. We must handle them as writes
1081 to get proper dependencies for following instructions. We must handle
1082 them as reads to get proper dependencies from this to previous
1083 instructions. Thus we need to pass them to both sched_analyze_1
1084 and sched_analyze_2. We must call sched_analyze_2 first in order
1085 to get the proper antecedent for the read. */
1086 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
1087 sched_analyze_1 (deps
, x
, insn
);
1092 /* op0 = op0 + op1 */
1093 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
1094 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
1095 sched_analyze_1 (deps
, x
, insn
);
1102 /* Other cases: walk the insn. */
1103 fmt
= GET_RTX_FORMAT (code
);
1104 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1107 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
1108 else if (fmt
[i
] == 'E')
1109 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1110 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
1114 /* Analyze an INSN with pattern X to find all dependencies. */
1117 sched_analyze_insn (struct deps
*deps
, rtx x
, rtx insn
)
1119 RTX_CODE code
= GET_CODE (x
);
1122 reg_set_iterator rsi
;
1124 if (code
== COND_EXEC
)
1126 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
1128 /* ??? Should be recording conditions so we reduce the number of
1129 false dependencies. */
1130 x
= COND_EXEC_CODE (x
);
1131 code
= GET_CODE (x
);
1133 if (code
== SET
|| code
== CLOBBER
)
1135 sched_analyze_1 (deps
, x
, insn
);
1137 /* Bare clobber insns are used for letting life analysis, reg-stack
1138 and others know that a value is dead. Depend on the last call
1139 instruction so that reg-stack won't get confused. */
1140 if (code
== CLOBBER
)
1141 add_dependence_list (insn
, deps
->last_function_call
, 1, REG_DEP_OUTPUT
);
1143 else if (code
== PARALLEL
)
1145 for (i
= XVECLEN (x
, 0); i
--;)
1147 rtx sub
= XVECEXP (x
, 0, i
);
1148 code
= GET_CODE (sub
);
1150 if (code
== COND_EXEC
)
1152 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
1153 sub
= COND_EXEC_CODE (sub
);
1154 code
= GET_CODE (sub
);
1156 if (code
== SET
|| code
== CLOBBER
)
1157 sched_analyze_1 (deps
, sub
, insn
);
1159 sched_analyze_2 (deps
, sub
, insn
);
1163 sched_analyze_2 (deps
, x
, insn
);
1165 /* Mark registers CLOBBERED or used by called function. */
1168 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1170 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1171 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
1173 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
1175 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1176 reg_pending_barrier
= MOVE_BARRIER
;
1182 next
= next_nonnote_insn (insn
);
1183 if (next
&& BARRIER_P (next
))
1184 reg_pending_barrier
= TRUE_BARRIER
;
1187 rtx pending
, pending_mem
;
1188 regset_head tmp_uses
, tmp_sets
;
1189 INIT_REG_SET (&tmp_uses
);
1190 INIT_REG_SET (&tmp_sets
);
1192 (*current_sched_info
->compute_jump_reg_dependencies
)
1193 (insn
, &deps
->reg_conditional_sets
, &tmp_uses
, &tmp_sets
);
1194 /* Make latency of jump equal to 0 by using anti-dependence. */
1195 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses
, 0, i
, rsi
)
1197 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1198 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
);
1199 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_ANTI
);
1200 reg_last
->uses_length
++;
1201 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1203 IOR_REG_SET (reg_pending_sets
, &tmp_sets
);
1205 CLEAR_REG_SET (&tmp_uses
);
1206 CLEAR_REG_SET (&tmp_sets
);
1208 /* All memory writes and volatile reads must happen before the
1209 jump. Non-volatile reads must happen before the jump iff
1210 the result is needed by the above register used mask. */
1212 pending
= deps
->pending_write_insns
;
1213 pending_mem
= deps
->pending_write_mems
;
1216 if (! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1217 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1218 pending
= XEXP (pending
, 1);
1219 pending_mem
= XEXP (pending_mem
, 1);
1222 pending
= deps
->pending_read_insns
;
1223 pending_mem
= deps
->pending_read_mems
;
1226 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0))
1227 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1228 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1229 pending
= XEXP (pending
, 1);
1230 pending_mem
= XEXP (pending_mem
, 1);
1233 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
1238 /* If this instruction can throw an exception, then moving it changes
1239 where block boundaries fall. This is mighty confusing elsewhere.
1240 Therefore, prevent such an instruction from being moved. Same for
1241 non-jump instructions that define block boundaries.
1242 ??? Unclear whether this is still necessary in EBB mode. If not,
1243 add_branch_dependences should be adjusted for RGN mode instead. */
1244 if (((CALL_P (insn
) || JUMP_P (insn
)) && can_throw_internal (insn
))
1245 || (NONJUMP_INSN_P (insn
) && control_flow_insn_p (insn
)))
1246 reg_pending_barrier
= MOVE_BARRIER
;
1248 /* Add dependencies if a scheduling barrier was found. */
1249 if (reg_pending_barrier
)
1251 /* In the case of barrier the most added dependencies are not
1252 real, so we use anti-dependence here. */
1253 if (sched_get_condition (insn
))
1255 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1257 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1258 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1260 (insn
, reg_last
->sets
, 0,
1261 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1263 (insn
, reg_last
->clobbers
, 0,
1264 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1269 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1271 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1272 add_dependence_list_and_free (insn
, ®_last
->uses
, 0,
1274 add_dependence_list_and_free
1275 (insn
, ®_last
->sets
, 0,
1276 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1277 add_dependence_list_and_free
1278 (insn
, ®_last
->clobbers
, 0,
1279 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1280 reg_last
->uses_length
= 0;
1281 reg_last
->clobbers_length
= 0;
1285 for (i
= 0; i
< (unsigned)deps
->max_reg
; i
++)
1287 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1288 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1289 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1292 flush_pending_lists (deps
, insn
, true, true);
1293 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
1294 reg_pending_barrier
= NOT_A_BARRIER
;
1298 /* If the current insn is conditional, we can't free any
1300 if (sched_get_condition (insn
))
1302 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1304 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1305 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
);
1306 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
);
1307 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1308 reg_last
->uses_length
++;
1310 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
1312 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1313 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
1314 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1315 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1316 reg_last
->clobbers_length
++;
1318 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
1320 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1321 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
1322 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_OUTPUT
);
1323 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1324 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1325 SET_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
1330 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1332 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1333 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
);
1334 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
);
1335 reg_last
->uses_length
++;
1336 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1338 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
1340 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1341 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
1342 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
1344 add_dependence_list_and_free (insn
, ®_last
->sets
, 0,
1346 add_dependence_list_and_free (insn
, ®_last
->uses
, 0,
1348 add_dependence_list_and_free (insn
, ®_last
->clobbers
, 0,
1350 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1351 reg_last
->clobbers_length
= 0;
1352 reg_last
->uses_length
= 0;
1356 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
1357 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1359 reg_last
->clobbers_length
++;
1360 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1362 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
1364 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1365 add_dependence_list_and_free (insn
, ®_last
->sets
, 0,
1367 add_dependence_list_and_free (insn
, ®_last
->clobbers
, 0,
1369 add_dependence_list_and_free (insn
, ®_last
->uses
, 0,
1371 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1372 reg_last
->uses_length
= 0;
1373 reg_last
->clobbers_length
= 0;
1374 CLEAR_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
1378 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
1379 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
1380 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
1382 CLEAR_REG_SET (reg_pending_uses
);
1383 CLEAR_REG_SET (reg_pending_clobbers
);
1384 CLEAR_REG_SET (reg_pending_sets
);
1386 /* If we are currently in a libcall scheduling group, then mark the
1387 current insn as being in a scheduling group and that it can not
1388 be moved into a different basic block. */
1390 if (deps
->libcall_block_tail_insn
)
1392 SCHED_GROUP_P (insn
) = 1;
1393 CANT_MOVE (insn
) = 1;
1396 /* If a post-call group is still open, see if it should remain so.
1397 This insn must be a simple move of a hard reg to a pseudo or
1400 We must avoid moving these insns for correctness on
1401 SMALL_REGISTER_CLASS machines, and for special registers like
1402 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1403 hard regs for all targets. */
1405 if (deps
->in_post_call_group_p
)
1407 rtx tmp
, set
= single_set (insn
);
1408 int src_regno
, dest_regno
;
1411 goto end_call_group
;
1413 tmp
= SET_DEST (set
);
1414 if (GET_CODE (tmp
) == SUBREG
)
1415 tmp
= SUBREG_REG (tmp
);
1417 dest_regno
= REGNO (tmp
);
1419 goto end_call_group
;
1421 tmp
= SET_SRC (set
);
1422 if (GET_CODE (tmp
) == SUBREG
)
1423 tmp
= SUBREG_REG (tmp
);
1424 if ((GET_CODE (tmp
) == PLUS
1425 || GET_CODE (tmp
) == MINUS
)
1426 && REG_P (XEXP (tmp
, 0))
1427 && REGNO (XEXP (tmp
, 0)) == STACK_POINTER_REGNUM
1428 && dest_regno
== STACK_POINTER_REGNUM
)
1429 src_regno
= STACK_POINTER_REGNUM
;
1430 else if (REG_P (tmp
))
1431 src_regno
= REGNO (tmp
);
1433 goto end_call_group
;
1435 if (src_regno
< FIRST_PSEUDO_REGISTER
1436 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1438 if (deps
->in_post_call_group_p
== post_call_initial
)
1439 deps
->in_post_call_group_p
= post_call
;
1441 SCHED_GROUP_P (insn
) = 1;
1442 CANT_MOVE (insn
) = 1;
1447 deps
->in_post_call_group_p
= not_post_call
;
1451 /* Fixup the dependencies in the sched group. */
1452 if (SCHED_GROUP_P (insn
))
1453 fixup_sched_groups (insn
);
1456 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1457 for every dependency. */
1460 sched_analyze (struct deps
*deps
, rtx head
, rtx tail
)
1464 if (current_sched_info
->use_cselib
)
1467 /* Before reload, if the previous block ended in a call, show that
1468 we are inside a post-call group, so as to keep the lifetimes of
1469 hard registers correct. */
1470 if (! reload_completed
&& !LABEL_P (head
))
1472 insn
= prev_nonnote_insn (head
);
1473 if (insn
&& CALL_P (insn
))
1474 deps
->in_post_call_group_p
= post_call_initial
;
1476 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1478 rtx link
, end_seq
, r0
, set
;
1480 if (NONJUMP_INSN_P (insn
) || JUMP_P (insn
))
1482 /* Clear out the stale LOG_LINKS from flow. */
1483 free_INSN_LIST_list (&LOG_LINKS (insn
));
1485 /* Make each JUMP_INSN a scheduling barrier for memory
1489 /* Keep the list a reasonable size. */
1490 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1491 flush_pending_lists (deps
, insn
, true, true);
1493 deps
->last_pending_memory_flush
1494 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1496 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
1498 else if (CALL_P (insn
))
1502 CANT_MOVE (insn
) = 1;
1504 /* Clear out the stale LOG_LINKS from flow. */
1505 free_INSN_LIST_list (&LOG_LINKS (insn
));
1507 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1509 /* This is setjmp. Assume that all registers, not just
1510 hard registers, may be clobbered by this call. */
1511 reg_pending_barrier
= MOVE_BARRIER
;
1515 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1516 /* A call may read and modify global register variables. */
1519 SET_REGNO_REG_SET (reg_pending_sets
, i
);
1520 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1522 /* Other call-clobbered hard regs may be clobbered.
1523 Since we only have a choice between 'might be clobbered'
1524 and 'definitely not clobbered', we must include all
1525 partly call-clobbered registers here. */
1526 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
1527 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1528 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1529 /* We don't know what set of fixed registers might be used
1530 by the function, but it is certain that the stack pointer
1531 is among them, but be conservative. */
1532 else if (fixed_regs
[i
])
1533 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1534 /* The frame pointer is normally not used by the function
1535 itself, but by the debugger. */
1536 /* ??? MIPS o32 is an exception. It uses the frame pointer
1537 in the macro expansion of jal but does not represent this
1538 fact in the call_insn rtl. */
1539 else if (i
== FRAME_POINTER_REGNUM
1540 || (i
== HARD_FRAME_POINTER_REGNUM
1541 && (! reload_completed
|| frame_pointer_needed
)))
1542 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1545 /* For each insn which shouldn't cross a call, add a dependence
1546 between that insn and this call insn. */
1547 add_dependence_list_and_free (insn
, &deps
->sched_before_next_call
, 1,
1550 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
1552 /* In the absence of interprocedural alias analysis, we must flush
1553 all pending reads and writes, and start new dependencies starting
1554 from here. But only flush writes for constant calls (which may
1555 be passed a pointer to something we haven't written yet). */
1556 flush_pending_lists (deps
, insn
, true, !CONST_OR_PURE_CALL_P (insn
));
1558 /* Remember the last function call for limiting lifetimes. */
1559 free_INSN_LIST_list (&deps
->last_function_call
);
1560 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1562 /* Before reload, begin a post-call group, so as to keep the
1563 lifetimes of hard registers correct. */
1564 if (! reload_completed
)
1565 deps
->in_post_call_group_p
= post_call
;
1568 /* EH_REGION insn notes can not appear until well after we complete
1571 gcc_assert (NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_BEG
1572 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_END
);
1574 if (current_sched_info
->use_cselib
)
1575 cselib_process_insn (insn
);
1577 /* Now that we have completed handling INSN, check and see if it is
1578 a CLOBBER beginning a libcall block. If it is, record the
1579 end of the libcall sequence.
1581 We want to schedule libcall blocks as a unit before reload. While
1582 this restricts scheduling, it preserves the meaning of a libcall
1585 As a side effect, we may get better code due to decreased register
1586 pressure as well as less chance of a foreign insn appearing in
1588 if (!reload_completed
1589 /* Note we may have nested libcall sequences. We only care about
1590 the outermost libcall sequence. */
1591 && deps
->libcall_block_tail_insn
== 0
1592 /* The sequence must start with a clobber of a register. */
1593 && NONJUMP_INSN_P (insn
)
1594 && GET_CODE (PATTERN (insn
)) == CLOBBER
1595 && (r0
= XEXP (PATTERN (insn
), 0), REG_P (r0
))
1596 && REG_P (XEXP (PATTERN (insn
), 0))
1597 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1598 && (link
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)) != 0
1599 && (end_seq
= XEXP (link
, 0)) != 0
1600 /* The insn referenced by the REG_LIBCALL note must be a
1601 simple nop copy with the same destination as the register
1602 mentioned in the clobber. */
1603 && (set
= single_set (end_seq
)) != 0
1604 && SET_DEST (set
) == r0
&& SET_SRC (set
) == r0
1605 /* And finally the insn referenced by the REG_LIBCALL must
1606 also contain a REG_EQUAL note and a REG_RETVAL note. */
1607 && find_reg_note (end_seq
, REG_EQUAL
, NULL_RTX
) != 0
1608 && find_reg_note (end_seq
, REG_RETVAL
, NULL_RTX
) != 0)
1609 deps
->libcall_block_tail_insn
= XEXP (link
, 0);
1611 /* If we have reached the end of a libcall block, then close the
1613 if (deps
->libcall_block_tail_insn
== insn
)
1614 deps
->libcall_block_tail_insn
= 0;
1618 if (current_sched_info
->use_cselib
)
1627 /* The following function adds forward dependence (FROM, TO) with
1628 given DEP_TYPE. The forward dependence should be not exist before. */
1631 add_forw_dep (rtx to
, rtx link
)
1635 from
= XEXP (link
, 0);
1637 #ifdef ENABLE_CHECKING
1638 /* If add_dependence is working properly there should never
1639 be notes, deleted insns or duplicates in the backward
1640 links. Thus we need not check for them here.
1642 However, if we have enabled checking we might as well go
1643 ahead and verify that add_dependence worked properly. */
1644 gcc_assert (INSN_P (from
));
1645 gcc_assert (!INSN_DELETED_P (from
));
1646 if (true_dependency_cache
)
1648 gcc_assert (!bitmap_bit_p (&forward_dependency_cache
[INSN_LUID (from
)],
1650 bitmap_set_bit (&forward_dependency_cache
[INSN_LUID (from
)],
1654 gcc_assert (!find_insn_list (to
, INSN_DEPEND (from
)));
1657 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1658 new_link
= alloc_INSN_LIST (to
, INSN_DEPEND (from
));
1660 new_link
= alloc_DEPS_LIST (to
, INSN_DEPEND (from
), DEP_STATUS (link
));
1662 PUT_REG_NOTE_KIND (new_link
, REG_NOTE_KIND (link
));
1664 INSN_DEPEND (from
) = new_link
;
1665 INSN_DEP_COUNT (to
) += 1;
1668 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1669 dependences from LOG_LINKS to build forward dependences in
1673 compute_forward_dependences (rtx head
, rtx tail
)
1678 next_tail
= NEXT_INSN (tail
);
1679 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1683 if (! INSN_P (insn
))
1686 if (current_sched_info
->flags
& DO_SPECULATION
)
1688 rtx
new = 0, link
, next
;
1690 for (link
= LOG_LINKS (insn
); link
; link
= next
)
1692 next
= XEXP (link
, 1);
1693 adjust_add_sorted_back_dep (insn
, link
, &new);
1696 LOG_LINKS (insn
) = new;
1699 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1700 add_forw_dep (insn
, link
);
1704 /* Initialize variables for region data dependence analysis.
1705 n_bbs is the number of region blocks. */
1708 init_deps (struct deps
*deps
)
1710 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1712 deps
->max_reg
= max_reg
;
1713 deps
->reg_last
= XCNEWVEC (struct deps_reg
, max_reg
);
1714 INIT_REG_SET (&deps
->reg_last_in_use
);
1715 INIT_REG_SET (&deps
->reg_conditional_sets
);
1717 deps
->pending_read_insns
= 0;
1718 deps
->pending_read_mems
= 0;
1719 deps
->pending_write_insns
= 0;
1720 deps
->pending_write_mems
= 0;
1721 deps
->pending_lists_length
= 0;
1722 deps
->pending_flush_length
= 0;
1723 deps
->last_pending_memory_flush
= 0;
1724 deps
->last_function_call
= 0;
1725 deps
->sched_before_next_call
= 0;
1726 deps
->in_post_call_group_p
= not_post_call
;
1727 deps
->libcall_block_tail_insn
= 0;
1730 /* Free insn lists found in DEPS. */
1733 free_deps (struct deps
*deps
)
1736 reg_set_iterator rsi
;
1738 free_INSN_LIST_list (&deps
->pending_read_insns
);
1739 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1740 free_INSN_LIST_list (&deps
->pending_write_insns
);
1741 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1742 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1744 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1745 times. For a testcase with 42000 regs and 8000 small basic blocks,
1746 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1747 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1749 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1751 free_INSN_LIST_list (®_last
->uses
);
1753 free_INSN_LIST_list (®_last
->sets
);
1754 if (reg_last
->clobbers
)
1755 free_INSN_LIST_list (®_last
->clobbers
);
1757 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1758 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
1760 free (deps
->reg_last
);
1763 /* If it is profitable to use them, initialize caches for tracking
1764 dependency information. LUID is the number of insns to be scheduled,
1765 it is used in the estimate of profitability. */
1768 init_dependency_caches (int luid
)
1770 /* ?!? We could save some memory by computing a per-region luid mapping
1771 which could reduce both the number of vectors in the cache and the size
1772 of each vector. Instead we just avoid the cache entirely unless the
1773 average number of instructions in a basic block is very high. See
1774 the comment before the declaration of true_dependency_cache for
1775 what we consider "very high". */
1776 if (luid
/ n_basic_blocks
> 100 * 5)
1779 extend_dependency_caches (luid
, true);
1783 /* Create or extend (depending on CREATE_P) dependency caches to
1786 extend_dependency_caches (int n
, bool create_p
)
1788 if (create_p
|| true_dependency_cache
)
1790 int i
, luid
= cache_size
+ n
;
1792 true_dependency_cache
= XRESIZEVEC (bitmap_head
, true_dependency_cache
,
1794 output_dependency_cache
= XRESIZEVEC (bitmap_head
,
1795 output_dependency_cache
, luid
);
1796 anti_dependency_cache
= XRESIZEVEC (bitmap_head
, anti_dependency_cache
,
1798 #ifdef ENABLE_CHECKING
1799 forward_dependency_cache
= XRESIZEVEC (bitmap_head
,
1800 forward_dependency_cache
, luid
);
1802 if (current_sched_info
->flags
& DO_SPECULATION
)
1803 spec_dependency_cache
= XRESIZEVEC (bitmap_head
, spec_dependency_cache
,
1806 for (i
= cache_size
; i
< luid
; i
++)
1808 bitmap_initialize (&true_dependency_cache
[i
], 0);
1809 bitmap_initialize (&output_dependency_cache
[i
], 0);
1810 bitmap_initialize (&anti_dependency_cache
[i
], 0);
1811 #ifdef ENABLE_CHECKING
1812 bitmap_initialize (&forward_dependency_cache
[i
], 0);
1814 if (current_sched_info
->flags
& DO_SPECULATION
)
1815 bitmap_initialize (&spec_dependency_cache
[i
], 0);
1821 /* Free the caches allocated in init_dependency_caches. */
1824 free_dependency_caches (void)
1826 if (true_dependency_cache
)
1830 for (i
= 0; i
< cache_size
; i
++)
1832 bitmap_clear (&true_dependency_cache
[i
]);
1833 bitmap_clear (&output_dependency_cache
[i
]);
1834 bitmap_clear (&anti_dependency_cache
[i
]);
1835 #ifdef ENABLE_CHECKING
1836 bitmap_clear (&forward_dependency_cache
[i
]);
1838 if (current_sched_info
->flags
& DO_SPECULATION
)
1839 bitmap_clear (&spec_dependency_cache
[i
]);
1841 free (true_dependency_cache
);
1842 true_dependency_cache
= NULL
;
1843 free (output_dependency_cache
);
1844 output_dependency_cache
= NULL
;
1845 free (anti_dependency_cache
);
1846 anti_dependency_cache
= NULL
;
1847 #ifdef ENABLE_CHECKING
1848 free (forward_dependency_cache
);
1849 forward_dependency_cache
= NULL
;
1851 if (current_sched_info
->flags
& DO_SPECULATION
)
1853 free (spec_dependency_cache
);
1854 spec_dependency_cache
= NULL
;
1859 /* Initialize some global variables needed by the dependency analysis
1863 init_deps_global (void)
1865 reg_pending_sets
= ALLOC_REG_SET (®_obstack
);
1866 reg_pending_clobbers
= ALLOC_REG_SET (®_obstack
);
1867 reg_pending_uses
= ALLOC_REG_SET (®_obstack
);
1868 reg_pending_barrier
= NOT_A_BARRIER
;
1871 /* Free everything used by the dependency analysis code. */
1874 finish_deps_global (void)
1876 FREE_REG_SET (reg_pending_sets
);
1877 FREE_REG_SET (reg_pending_clobbers
);
1878 FREE_REG_SET (reg_pending_uses
);
1881 /* Insert LINK into the dependence chain pointed to by LINKP and
1882 maintain the sort order. */
1884 adjust_add_sorted_back_dep (rtx insn
, rtx link
, rtx
*linkp
)
1886 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1888 /* If the insn cannot move speculatively, but the link is speculative,
1889 make it hard dependence. */
1890 if (HAS_INTERNAL_DEP (insn
)
1891 && (DEP_STATUS (link
) & SPECULATIVE
))
1893 DEP_STATUS (link
) &= ~SPECULATIVE
;
1895 if (true_dependency_cache
)
1896 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
1897 INSN_LUID (XEXP (link
, 0)));
1900 /* Non-speculative links go at the head of LOG_LINKS, followed by
1901 speculative links. */
1902 if (DEP_STATUS (link
) & SPECULATIVE
)
1903 while (*linkp
&& !(DEP_STATUS (*linkp
) & SPECULATIVE
))
1904 linkp
= &XEXP (*linkp
, 1);
1906 XEXP (link
, 1) = *linkp
;
1910 /* Move the dependence pointed to by LINKP to the back dependencies
1911 of INSN, and also add this dependence to the forward ones. All LOG_LINKS,
1912 except one pointed to by LINKP, must be sorted. */
1914 adjust_back_add_forw_dep (rtx insn
, rtx
*linkp
)
1918 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1921 *linkp
= XEXP (*linkp
, 1);
1923 adjust_add_sorted_back_dep (insn
, link
, &LOG_LINKS (insn
));
1924 add_forw_dep (insn
, link
);
1927 /* Remove forward dependence ELEM from the DEPS_LIST of INSN. */
1929 delete_forw_dep (rtx insn
, rtx elem
)
1931 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1933 #ifdef ENABLE_CHECKING
1934 if (true_dependency_cache
)
1935 bitmap_clear_bit (&forward_dependency_cache
[INSN_LUID (elem
)],
1939 remove_free_DEPS_LIST_elem (insn
, &INSN_DEPEND (elem
));
1940 INSN_DEP_COUNT (insn
)--;
1943 /* Estimate the weakness of dependence between MEM1 and MEM2. */
1945 estimate_dep_weak (rtx mem1
, rtx mem2
)
1950 /* MEMs are the same - don't speculate. */
1951 return MIN_DEP_WEAK
;
1953 r1
= XEXP (mem1
, 0);
1954 r2
= XEXP (mem2
, 0);
1957 || (REG_P (r1
) && REG_P (r2
)
1958 && REGNO (r1
) == REGNO (r2
)))
1959 /* Again, MEMs are the same. */
1960 return MIN_DEP_WEAK
;
1961 else if ((REG_P (r1
) && !REG_P (r2
))
1962 || (!REG_P (r1
) && REG_P (r2
)))
1963 /* Different addressing modes - reason to be more speculative,
1965 return NO_DEP_WEAK
- (NO_DEP_WEAK
- UNCERTAIN_DEP_WEAK
) / 2;
1967 /* We can't say anything about the dependence. */
1968 return UNCERTAIN_DEP_WEAK
;
1971 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
1972 This function can handle same INSN and ELEM (INSN == ELEM).
1973 It is a convenience wrapper. */
1975 add_dependence (rtx insn
, rtx elem
, enum reg_note dep_type
)
1979 if (dep_type
== REG_DEP_TRUE
)
1981 else if (dep_type
== REG_DEP_OUTPUT
)
1983 else if (dep_type
== REG_DEP_ANTI
)
1988 maybe_add_or_update_back_dep_1 (insn
, elem
, dep_type
, ds
, 0, 0, 0);
1991 /* Add or update backward dependence between INSN and ELEM
1992 with given type DEP_TYPE and dep_status DS.
1993 This function is a convenience wrapper. */
1994 enum DEPS_ADJUST_RESULT
1995 add_or_update_back_dep (rtx insn
, rtx elem
, enum reg_note dep_type
, ds_t ds
)
1997 return add_or_update_back_dep_1 (insn
, elem
, dep_type
, ds
, 0, 0, 0);
2000 /* Add or update both backward and forward dependencies between INSN and ELEM
2001 with given type DEP_TYPE and dep_status DS. */
2003 add_or_update_back_forw_dep (rtx insn
, rtx elem
, enum reg_note dep_type
,
2006 enum DEPS_ADJUST_RESULT res
;
2009 res
= add_or_update_back_dep_1 (insn
, elem
, dep_type
, ds
, 0, 0, &linkp
);
2011 if (res
== DEP_CHANGED
|| res
== DEP_CREATED
)
2013 if (res
== DEP_CHANGED
)
2014 delete_forw_dep (insn
, elem
);
2015 else if (res
== DEP_CREATED
)
2016 linkp
= &LOG_LINKS (insn
);
2018 adjust_back_add_forw_dep (insn
, linkp
);
2022 /* Add both backward and forward dependencies between INSN and ELEM
2023 with given type DEP_TYPE and dep_status DS. */
2025 add_back_forw_dep (rtx insn
, rtx elem
, enum reg_note dep_type
, ds_t ds
)
2027 add_back_dep (insn
, elem
, dep_type
, ds
);
2028 adjust_back_add_forw_dep (insn
, &LOG_LINKS (insn
));
2031 /* Remove both backward and forward dependencies between INSN and ELEM. */
2033 delete_back_forw_dep (rtx insn
, rtx elem
)
2035 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
2037 if (true_dependency_cache
!= NULL
)
2039 bitmap_clear_bit (&true_dependency_cache
[INSN_LUID (insn
)],
2041 bitmap_clear_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
2043 bitmap_clear_bit (&output_dependency_cache
[INSN_LUID (insn
)],
2045 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
2049 remove_free_DEPS_LIST_elem (elem
, &LOG_LINKS (insn
));
2050 delete_forw_dep (insn
, elem
);
2053 /* Return weakness of speculative type TYPE in the dep_status DS. */
2055 get_dep_weak (ds_t ds
, ds_t type
)
2060 case BEGIN_DATA
: ds
>>= BEGIN_DATA_BITS_OFFSET
; break;
2061 case BE_IN_DATA
: ds
>>= BE_IN_DATA_BITS_OFFSET
; break;
2062 case BEGIN_CONTROL
: ds
>>= BEGIN_CONTROL_BITS_OFFSET
; break;
2063 case BE_IN_CONTROL
: ds
>>= BE_IN_CONTROL_BITS_OFFSET
; break;
2064 default: gcc_unreachable ();
2067 gcc_assert (MIN_DEP_WEAK
<= ds
&& ds
<= MAX_DEP_WEAK
);
2071 /* Return the dep_status, which has the same parameters as DS, except for
2072 speculative type TYPE, that will have weakness DW. */
2074 set_dep_weak (ds_t ds
, ds_t type
, dw_t dw
)
2076 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
2081 case BEGIN_DATA
: ds
|= ((ds_t
) dw
) << BEGIN_DATA_BITS_OFFSET
; break;
2082 case BE_IN_DATA
: ds
|= ((ds_t
) dw
) << BE_IN_DATA_BITS_OFFSET
; break;
2083 case BEGIN_CONTROL
: ds
|= ((ds_t
) dw
) << BEGIN_CONTROL_BITS_OFFSET
; break;
2084 case BE_IN_CONTROL
: ds
|= ((ds_t
) dw
) << BE_IN_CONTROL_BITS_OFFSET
; break;
2085 default: gcc_unreachable ();
2090 /* Return the join of two dep_statuses DS1 and DS2. */
2092 ds_merge (ds_t ds1
, ds_t ds2
)
2096 gcc_assert ((ds1
& SPECULATIVE
) && (ds2
& SPECULATIVE
));
2098 ds
= (ds1
& DEP_TYPES
) | (ds2
& DEP_TYPES
);
2100 t
= FIRST_SPEC_TYPE
;
2103 if ((ds1
& t
) && !(ds2
& t
))
2105 else if (!(ds1
& t
) && (ds2
& t
))
2107 else if ((ds1
& t
) && (ds2
& t
))
2111 dw
= ((ds_t
) get_dep_weak (ds1
, t
)) * ((ds_t
) get_dep_weak (ds2
, t
));
2113 if (dw
< MIN_DEP_WEAK
)
2116 ds
= set_dep_weak (ds
, t
, (dw_t
) dw
);
2119 if (t
== LAST_SPEC_TYPE
)
2121 t
<<= SPEC_TYPE_SHIFT
;
2128 #ifdef INSN_SCHEDULING
2129 #ifdef ENABLE_CHECKING
2130 /* Verify that dependence type and status are consistent.
2131 If RELAXED_P is true, then skip dep_weakness checks. */
2133 check_dep_status (enum reg_note dt
, ds_t ds
, bool relaxed_p
)
2135 /* Check that dependence type contains the same bits as the status. */
2136 if (dt
== REG_DEP_TRUE
)
2137 gcc_assert (ds
& DEP_TRUE
);
2138 else if (dt
== REG_DEP_OUTPUT
)
2139 gcc_assert ((ds
& DEP_OUTPUT
)
2140 && !(ds
& DEP_TRUE
));
2142 gcc_assert ((dt
== REG_DEP_ANTI
)
2144 && !(ds
& (DEP_OUTPUT
| DEP_TRUE
)));
2146 /* HARD_DEP can not appear in dep_status of a link. */
2147 gcc_assert (!(ds
& HARD_DEP
));
2149 /* Check that dependence status is set correctly when speculation is not
2151 if (!(current_sched_info
->flags
& DO_SPECULATION
))
2152 gcc_assert (!(ds
& SPECULATIVE
));
2153 else if (ds
& SPECULATIVE
)
2157 ds_t type
= FIRST_SPEC_TYPE
;
2159 /* Check that dependence weakness is in proper range. */
2163 get_dep_weak (ds
, type
);
2165 if (type
== LAST_SPEC_TYPE
)
2167 type
<<= SPEC_TYPE_SHIFT
;
2172 if (ds
& BEGIN_SPEC
)
2174 /* Only true dependence can be data speculative. */
2175 if (ds
& BEGIN_DATA
)
2176 gcc_assert (ds
& DEP_TRUE
);
2178 /* Control dependencies in the insn scheduler are represented by
2179 anti-dependencies, therefore only anti dependence can be
2180 control speculative. */
2181 if (ds
& BEGIN_CONTROL
)
2182 gcc_assert (ds
& DEP_ANTI
);
2186 /* Subsequent speculations should resolve true dependencies. */
2187 gcc_assert ((ds
& DEP_TYPES
) == DEP_TRUE
);
2190 /* Check that true and anti dependencies can't have other speculative
2193 gcc_assert (ds
& (BEGIN_DATA
| BE_IN_SPEC
));
2194 /* An output dependence can't be speculative at all. */
2195 gcc_assert (!(ds
& DEP_OUTPUT
));
2197 gcc_assert (ds
& BEGIN_CONTROL
);