* external/sax/Makefile.am (GCJ_WITH_FLAGS): Add "-fclasspath=".
[official-gcc.git] / gcc / sched-deps.c
blob933d74da60c7d8e5d22db6b988dff4ea8c00f296
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "sched-int.h"
42 #include "params.h"
43 #include "cselib.h"
44 #include "df.h"
47 static regset reg_pending_sets;
48 static regset reg_pending_clobbers;
49 static regset reg_pending_uses;
51 /* The following enumeration values tell us what dependencies we
52 should use to implement the barrier. We use true-dependencies for
53 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
54 enum reg_pending_barrier_mode
56 NOT_A_BARRIER = 0,
57 MOVE_BARRIER,
58 TRUE_BARRIER
61 static enum reg_pending_barrier_mode reg_pending_barrier;
63 /* To speed up the test for duplicate dependency links we keep a
64 record of dependencies created by add_dependence when the average
65 number of instructions in a basic block is very large.
67 Studies have shown that there is typically around 5 instructions between
68 branches for typical C code. So we can make a guess that the average
69 basic block is approximately 5 instructions long; we will choose 100X
70 the average size as a very large basic block.
72 Each insn has associated bitmaps for its dependencies. Each bitmap
73 has enough entries to represent a dependency on any other insn in
74 the insn chain. All bitmap for true dependencies cache is
75 allocated then the rest two ones are also allocated. */
76 static bitmap_head *true_dependency_cache;
77 static bitmap_head *anti_dependency_cache;
78 static bitmap_head *output_dependency_cache;
79 static int cache_size;
81 /* To speed up checking consistency of formed forward insn
82 dependencies we use the following cache. Another possible solution
83 could be switching off checking duplication of insns in forward
84 dependencies. */
85 #ifdef ENABLE_CHECKING
86 static bitmap_head *forward_dependency_cache;
87 #endif
89 static int deps_may_trap_p (rtx);
90 static void add_dependence_list (rtx, rtx, enum reg_note);
91 static void add_dependence_list_and_free (rtx, rtx *, enum reg_note);
92 static void delete_all_dependences (rtx);
93 static void fixup_sched_groups (rtx);
95 static void flush_pending_lists (struct deps *, rtx, int, int);
96 static void sched_analyze_1 (struct deps *, rtx, rtx);
97 static void sched_analyze_2 (struct deps *, rtx, rtx);
98 static void sched_analyze_insn (struct deps *, rtx, rtx, rtx);
100 static rtx sched_get_condition (rtx);
101 static int conditions_mutex_p (rtx, rtx);
103 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
105 static int
106 deps_may_trap_p (rtx mem)
108 rtx addr = XEXP (mem, 0);
110 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
112 rtx t = get_reg_known_value (REGNO (addr));
113 if (t)
114 addr = t;
116 return rtx_addr_can_trap_p (addr);
119 /* Return the INSN_LIST containing INSN in LIST, or NULL
120 if LIST does not contain INSN. */
123 find_insn_list (rtx insn, rtx list)
125 while (list)
127 if (XEXP (list, 0) == insn)
128 return list;
129 list = XEXP (list, 1);
131 return 0;
134 /* Find the condition under which INSN is executed. */
136 static rtx
137 sched_get_condition (rtx insn)
139 rtx pat = PATTERN (insn);
140 rtx src;
142 if (pat == 0)
143 return 0;
145 if (GET_CODE (pat) == COND_EXEC)
146 return COND_EXEC_TEST (pat);
148 if (!any_condjump_p (insn) || !onlyjump_p (insn))
149 return 0;
151 src = SET_SRC (pc_set (insn));
152 #if 0
153 /* The previous code here was completely invalid and could never extract
154 the condition from a jump. This code does the correct thing, but that
155 triggers latent bugs later in the scheduler on ports with conditional
156 execution. So this is disabled for now. */
157 if (XEXP (src, 2) == pc_rtx)
158 return XEXP (src, 0);
159 else if (XEXP (src, 1) == pc_rtx)
161 rtx cond = XEXP (src, 0);
162 enum rtx_code revcode = reversed_comparison_code (cond, insn);
164 if (revcode == UNKNOWN)
165 return 0;
166 return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
167 XEXP (cond, 1));
169 #endif
171 return 0;
174 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
176 static int
177 conditions_mutex_p (rtx cond1, rtx cond2)
179 if (COMPARISON_P (cond1)
180 && COMPARISON_P (cond2)
181 && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
182 && XEXP (cond1, 0) == XEXP (cond2, 0)
183 && XEXP (cond1, 1) == XEXP (cond2, 1))
184 return 1;
185 return 0;
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
190 type of dependence that this link represents. The function returns
191 nonzero if a new entry has been added to insn's LOG_LINK. */
194 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
196 rtx link;
197 int present_p;
198 rtx cond1, cond2;
200 /* Don't depend an insn on itself. */
201 if (insn == elem)
202 return 0;
204 /* We can get a dependency on deleted insns due to optimizations in
205 the register allocation and reloading or due to splitting. Any
206 such dependency is useless and can be ignored. */
207 if (NOTE_P (elem))
208 return 0;
210 /* flow.c doesn't handle conditional lifetimes entirely correctly;
211 calls mess up the conditional lifetimes. */
212 /* ??? add_dependence is the wrong place to be eliding dependencies,
213 as that forgets that the condition expressions themselves may
214 be dependent. */
215 if (!CALL_P (insn) && !CALL_P (elem))
217 cond1 = sched_get_condition (insn);
218 cond2 = sched_get_condition (elem);
219 if (cond1 && cond2
220 && conditions_mutex_p (cond1, cond2)
221 /* Make sure first instruction doesn't affect condition of second
222 instruction if switched. */
223 && !modified_in_p (cond1, elem)
224 /* Make sure second instruction doesn't affect condition of first
225 instruction if switched. */
226 && !modified_in_p (cond2, insn))
227 return 0;
230 present_p = 1;
231 #ifdef INSN_SCHEDULING
232 /* ??? No good way to tell from here whether we're doing interblock
233 scheduling. Possibly add another callback. */
234 #if 0
235 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
236 No need for interblock dependences with calls, since
237 calls are not moved between blocks. Note: the edge where
238 elem is a CALL is still required. */
239 if (CALL_P (insn)
240 && (INSN_BB (elem) != INSN_BB (insn)))
241 return 0;
242 #endif
244 /* If we already have a dependency for ELEM, then we do not need to
245 do anything. Avoiding the list walk below can cut compile times
246 dramatically for some code. */
247 if (true_dependency_cache != NULL)
249 enum reg_note present_dep_type = 0;
251 gcc_assert (anti_dependency_cache);
252 gcc_assert (output_dependency_cache);
253 if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
254 INSN_LUID (elem)))
255 /* Do nothing (present_set_type is already 0). */
257 else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
258 INSN_LUID (elem)))
259 present_dep_type = REG_DEP_ANTI;
260 else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
261 INSN_LUID (elem)))
262 present_dep_type = REG_DEP_OUTPUT;
263 else
264 present_p = 0;
265 if (present_p && (int) dep_type >= (int) present_dep_type)
266 return 0;
268 #endif
270 /* Check that we don't already have this dependence. */
271 if (present_p)
272 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
273 if (XEXP (link, 0) == elem)
275 #ifdef INSN_SCHEDULING
276 /* Clear corresponding cache entry because type of the link
277 may be changed. */
278 if (true_dependency_cache != NULL)
280 enum reg_note kind = REG_NOTE_KIND (link);
281 switch (kind)
283 case REG_DEP_ANTI:
284 bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
285 INSN_LUID (elem));
286 break;
287 case REG_DEP_OUTPUT:
288 gcc_assert (output_dependency_cache);
289 bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
290 INSN_LUID (elem));
291 break;
292 default:
293 gcc_unreachable ();
296 #endif
298 /* If this is a more restrictive type of dependence than the existing
299 one, then change the existing dependence to this type. */
300 if ((int) dep_type < (int) REG_NOTE_KIND (link))
301 PUT_REG_NOTE_KIND (link, dep_type);
303 #ifdef INSN_SCHEDULING
304 /* If we are adding a dependency to INSN's LOG_LINKs, then
305 note that in the bitmap caches of dependency information. */
306 if (true_dependency_cache != NULL)
308 if ((int) REG_NOTE_KIND (link) == 0)
309 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
310 INSN_LUID (elem));
311 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
312 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
313 INSN_LUID (elem));
314 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
315 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
316 INSN_LUID (elem));
318 #endif
319 return 0;
321 /* Might want to check one level of transitivity to save conses. */
323 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
324 LOG_LINKS (insn) = link;
326 /* Insn dependency, not data dependency. */
327 PUT_REG_NOTE_KIND (link, dep_type);
329 #ifdef INSN_SCHEDULING
330 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
331 in the bitmap caches of dependency information. */
332 if (true_dependency_cache != NULL)
334 if ((int) dep_type == 0)
335 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
336 else if (dep_type == REG_DEP_ANTI)
337 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
338 else if (dep_type == REG_DEP_OUTPUT)
339 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
341 #endif
342 return 1;
345 /* A convenience wrapper to operate on an entire list. */
347 static void
348 add_dependence_list (rtx insn, rtx list, enum reg_note dep_type)
350 for (; list; list = XEXP (list, 1))
351 add_dependence (insn, XEXP (list, 0), dep_type);
354 /* Similar, but free *LISTP at the same time. */
356 static void
357 add_dependence_list_and_free (rtx insn, rtx *listp, enum reg_note dep_type)
359 rtx list, next;
360 for (list = *listp, *listp = NULL; list ; list = next)
362 next = XEXP (list, 1);
363 add_dependence (insn, XEXP (list, 0), dep_type);
364 free_INSN_LIST_node (list);
368 /* Clear all dependencies for an insn. */
370 static void
371 delete_all_dependences (rtx insn)
373 /* Clear caches, if they exist, as well as free the dependence. */
375 #ifdef INSN_SCHEDULING
376 if (true_dependency_cache != NULL)
378 bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
379 bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
380 bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
382 #endif
384 free_INSN_LIST_list (&LOG_LINKS (insn));
387 /* All insns in a scheduling group except the first should only have
388 dependencies on the previous insn in the group. So we find the
389 first instruction in the scheduling group by walking the dependence
390 chains backwards. Then we add the dependencies for the group to
391 the previous nonnote insn. */
393 static void
394 fixup_sched_groups (rtx insn)
396 rtx link;
398 for (link = LOG_LINKS (insn); link ; link = XEXP (link, 1))
400 rtx i = insn;
403 i = prev_nonnote_insn (i);
405 if (XEXP (link, 0) == i)
406 goto next_link;
407 } while (SCHED_GROUP_P (i));
408 add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
409 next_link:;
412 delete_all_dependences (insn);
414 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote_insn (insn)))
415 add_dependence (insn, prev_nonnote_insn (insn), REG_DEP_ANTI);
418 /* Process an insn's memory dependencies. There are four kinds of
419 dependencies:
421 (0) read dependence: read follows read
422 (1) true dependence: read follows write
423 (2) anti dependence: write follows read
424 (3) output dependence: write follows write
426 We are careful to build only dependencies which actually exist, and
427 use transitivity to avoid building too many links. */
429 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
430 The MEM is a memory reference contained within INSN, which we are saving
431 so that we can do memory aliasing on it. */
433 static void
434 add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
435 rtx insn, rtx mem)
437 rtx link;
439 link = alloc_INSN_LIST (insn, *insn_list);
440 *insn_list = link;
442 if (current_sched_info->use_cselib)
444 mem = shallow_copy_rtx (mem);
445 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
447 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
448 *mem_list = link;
450 deps->pending_lists_length++;
453 /* Make a dependency between every memory reference on the pending lists
454 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
455 dependencies for a read operation, similarly with FOR_WRITE. */
457 static void
458 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
459 int for_write)
461 if (for_write)
463 add_dependence_list_and_free (insn, &deps->pending_read_insns,
464 REG_DEP_ANTI);
465 free_EXPR_LIST_list (&deps->pending_read_mems);
468 add_dependence_list_and_free (insn, &deps->pending_write_insns,
469 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
470 free_EXPR_LIST_list (&deps->pending_write_mems);
471 deps->pending_lists_length = 0;
473 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
474 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
475 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
476 deps->pending_flush_length = 1;
479 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
480 rtx, X, creating all dependencies generated by the write to the
481 destination of X, and reads of everything mentioned. */
483 static void
484 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
486 int regno;
487 rtx dest = XEXP (x, 0);
488 enum rtx_code code = GET_CODE (x);
490 if (dest == 0)
491 return;
493 if (GET_CODE (dest) == PARALLEL)
495 int i;
497 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
498 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
499 sched_analyze_1 (deps,
500 gen_rtx_CLOBBER (VOIDmode,
501 XEXP (XVECEXP (dest, 0, i), 0)),
502 insn);
504 if (GET_CODE (x) == SET)
505 sched_analyze_2 (deps, SET_SRC (x), insn);
506 return;
509 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
510 || GET_CODE (dest) == ZERO_EXTRACT)
512 if (GET_CODE (dest) == STRICT_LOW_PART
513 || GET_CODE (dest) == ZERO_EXTRACT
514 || read_modify_subreg_p (dest))
516 /* These both read and modify the result. We must handle
517 them as writes to get proper dependencies for following
518 instructions. We must handle them as reads to get proper
519 dependencies from this to previous instructions.
520 Thus we need to call sched_analyze_2. */
522 sched_analyze_2 (deps, XEXP (dest, 0), insn);
524 if (GET_CODE (dest) == ZERO_EXTRACT)
526 /* The second and third arguments are values read by this insn. */
527 sched_analyze_2 (deps, XEXP (dest, 1), insn);
528 sched_analyze_2 (deps, XEXP (dest, 2), insn);
530 dest = XEXP (dest, 0);
533 if (REG_P (dest))
535 regno = REGNO (dest);
537 /* A hard reg in a wide mode may really be multiple registers.
538 If so, mark all of them just like the first. */
539 if (regno < FIRST_PSEUDO_REGISTER)
541 int i = hard_regno_nregs[regno][GET_MODE (dest)];
542 if (code == SET)
544 while (--i >= 0)
545 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
547 else
549 while (--i >= 0)
550 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
553 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
554 it does not reload. Ignore these as they have served their
555 purpose already. */
556 else if (regno >= deps->max_reg)
558 gcc_assert (GET_CODE (PATTERN (insn)) == USE
559 || GET_CODE (PATTERN (insn)) == CLOBBER);
561 else
563 if (code == SET)
564 SET_REGNO_REG_SET (reg_pending_sets, regno);
565 else
566 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
568 /* Pseudos that are REG_EQUIV to something may be replaced
569 by that during reloading. We need only add dependencies for
570 the address in the REG_EQUIV note. */
571 if (!reload_completed && get_reg_known_equiv_p (regno))
573 rtx t = get_reg_known_value (regno);
574 if (MEM_P (t))
575 sched_analyze_2 (deps, XEXP (t, 0), insn);
578 /* Don't let it cross a call after scheduling if it doesn't
579 already cross one. */
580 if (REG_N_CALLS_CROSSED (regno) == 0)
581 add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
584 else if (MEM_P (dest))
586 /* Writing memory. */
587 rtx t = dest;
589 if (current_sched_info->use_cselib)
591 t = shallow_copy_rtx (dest);
592 cselib_lookup (XEXP (t, 0), Pmode, 1);
593 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
595 t = canon_rtx (t);
597 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
599 /* Flush all pending reads and writes to prevent the pending lists
600 from getting any larger. Insn scheduling runs too slowly when
601 these lists get long. When compiling GCC with itself,
602 this flush occurs 8 times for sparc, and 10 times for m88k using
603 the default value of 32. */
604 flush_pending_lists (deps, insn, false, true);
606 else
608 rtx pending, pending_mem;
610 pending = deps->pending_read_insns;
611 pending_mem = deps->pending_read_mems;
612 while (pending)
614 if (anti_dependence (XEXP (pending_mem, 0), t))
615 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
617 pending = XEXP (pending, 1);
618 pending_mem = XEXP (pending_mem, 1);
621 pending = deps->pending_write_insns;
622 pending_mem = deps->pending_write_mems;
623 while (pending)
625 if (output_dependence (XEXP (pending_mem, 0), t))
626 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
628 pending = XEXP (pending, 1);
629 pending_mem = XEXP (pending_mem, 1);
632 add_dependence_list (insn, deps->last_pending_memory_flush,
633 REG_DEP_ANTI);
635 add_insn_mem_dependence (deps, &deps->pending_write_insns,
636 &deps->pending_write_mems, insn, dest);
638 sched_analyze_2 (deps, XEXP (dest, 0), insn);
641 /* Analyze reads. */
642 if (GET_CODE (x) == SET)
643 sched_analyze_2 (deps, SET_SRC (x), insn);
646 /* Analyze the uses of memory and registers in rtx X in INSN. */
648 static void
649 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
651 int i;
652 int j;
653 enum rtx_code code;
654 const char *fmt;
656 if (x == 0)
657 return;
659 code = GET_CODE (x);
661 switch (code)
663 case CONST_INT:
664 case CONST_DOUBLE:
665 case CONST_VECTOR:
666 case SYMBOL_REF:
667 case CONST:
668 case LABEL_REF:
669 /* Ignore constants. Note that we must handle CONST_DOUBLE here
670 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
671 this does not mean that this insn is using cc0. */
672 return;
674 #ifdef HAVE_cc0
675 case CC0:
676 /* User of CC0 depends on immediately preceding insn. */
677 SCHED_GROUP_P (insn) = 1;
678 /* Don't move CC0 setter to another block (it can set up the
679 same flag for previous CC0 users which is safe). */
680 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
681 return;
682 #endif
684 case REG:
686 int regno = REGNO (x);
687 if (regno < FIRST_PSEUDO_REGISTER)
689 int i = hard_regno_nregs[regno][GET_MODE (x)];
690 while (--i >= 0)
691 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
693 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
694 it does not reload. Ignore these as they have served their
695 purpose already. */
696 else if (regno >= deps->max_reg)
698 gcc_assert (GET_CODE (PATTERN (insn)) == USE
699 || GET_CODE (PATTERN (insn)) == CLOBBER);
701 else
703 SET_REGNO_REG_SET (reg_pending_uses, regno);
705 /* Pseudos that are REG_EQUIV to something may be replaced
706 by that during reloading. We need only add dependencies for
707 the address in the REG_EQUIV note. */
708 if (!reload_completed && get_reg_known_equiv_p (regno))
710 rtx t = get_reg_known_value (regno);
711 if (MEM_P (t))
712 sched_analyze_2 (deps, XEXP (t, 0), insn);
715 /* If the register does not already cross any calls, then add this
716 insn to the sched_before_next_call list so that it will still
717 not cross calls after scheduling. */
718 if (REG_N_CALLS_CROSSED (regno) == 0)
719 deps->sched_before_next_call
720 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
722 return;
725 case MEM:
727 /* Reading memory. */
728 rtx u;
729 rtx pending, pending_mem;
730 rtx t = x;
732 if (current_sched_info->use_cselib)
734 t = shallow_copy_rtx (t);
735 cselib_lookup (XEXP (t, 0), Pmode, 1);
736 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
738 t = canon_rtx (t);
739 pending = deps->pending_read_insns;
740 pending_mem = deps->pending_read_mems;
741 while (pending)
743 if (read_dependence (XEXP (pending_mem, 0), t))
744 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
746 pending = XEXP (pending, 1);
747 pending_mem = XEXP (pending_mem, 1);
750 pending = deps->pending_write_insns;
751 pending_mem = deps->pending_write_mems;
752 while (pending)
754 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
755 t, rtx_varies_p))
756 add_dependence (insn, XEXP (pending, 0), 0);
758 pending = XEXP (pending, 1);
759 pending_mem = XEXP (pending_mem, 1);
762 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
763 if (!JUMP_P (XEXP (u, 0))
764 || deps_may_trap_p (x))
765 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
767 /* Always add these dependencies to pending_reads, since
768 this insn may be followed by a write. */
769 add_insn_mem_dependence (deps, &deps->pending_read_insns,
770 &deps->pending_read_mems, insn, x);
772 /* Take advantage of tail recursion here. */
773 sched_analyze_2 (deps, XEXP (x, 0), insn);
774 return;
777 /* Force pending stores to memory in case a trap handler needs them. */
778 case TRAP_IF:
779 flush_pending_lists (deps, insn, true, false);
780 break;
782 case ASM_OPERANDS:
783 case ASM_INPUT:
784 case UNSPEC_VOLATILE:
786 /* Traditional and volatile asm instructions must be considered to use
787 and clobber all hard registers, all pseudo-registers and all of
788 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
790 Consider for instance a volatile asm that changes the fpu rounding
791 mode. An insn should not be moved across this even if it only uses
792 pseudo-regs because it might give an incorrectly rounded result. */
793 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
794 reg_pending_barrier = TRUE_BARRIER;
796 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
797 We can not just fall through here since then we would be confused
798 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
799 traditional asms unlike their normal usage. */
801 if (code == ASM_OPERANDS)
803 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
804 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
805 return;
807 break;
810 case PRE_DEC:
811 case POST_DEC:
812 case PRE_INC:
813 case POST_INC:
814 /* These both read and modify the result. We must handle them as writes
815 to get proper dependencies for following instructions. We must handle
816 them as reads to get proper dependencies from this to previous
817 instructions. Thus we need to pass them to both sched_analyze_1
818 and sched_analyze_2. We must call sched_analyze_2 first in order
819 to get the proper antecedent for the read. */
820 sched_analyze_2 (deps, XEXP (x, 0), insn);
821 sched_analyze_1 (deps, x, insn);
822 return;
824 case POST_MODIFY:
825 case PRE_MODIFY:
826 /* op0 = op0 + op1 */
827 sched_analyze_2 (deps, XEXP (x, 0), insn);
828 sched_analyze_2 (deps, XEXP (x, 1), insn);
829 sched_analyze_1 (deps, x, insn);
830 return;
832 default:
833 break;
836 /* Other cases: walk the insn. */
837 fmt = GET_RTX_FORMAT (code);
838 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
840 if (fmt[i] == 'e')
841 sched_analyze_2 (deps, XEXP (x, i), insn);
842 else if (fmt[i] == 'E')
843 for (j = 0; j < XVECLEN (x, i); j++)
844 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
848 /* Analyze an INSN with pattern X to find all dependencies. */
850 static void
851 sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes)
853 RTX_CODE code = GET_CODE (x);
854 rtx link;
855 unsigned i;
856 reg_set_iterator rsi;
858 if (code == COND_EXEC)
860 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
862 /* ??? Should be recording conditions so we reduce the number of
863 false dependencies. */
864 x = COND_EXEC_CODE (x);
865 code = GET_CODE (x);
867 if (code == SET || code == CLOBBER)
869 sched_analyze_1 (deps, x, insn);
871 /* Bare clobber insns are used for letting life analysis, reg-stack
872 and others know that a value is dead. Depend on the last call
873 instruction so that reg-stack won't get confused. */
874 if (code == CLOBBER)
875 add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
877 else if (code == PARALLEL)
879 for (i = XVECLEN (x, 0); i--;)
881 rtx sub = XVECEXP (x, 0, i);
882 code = GET_CODE (sub);
884 if (code == COND_EXEC)
886 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
887 sub = COND_EXEC_CODE (sub);
888 code = GET_CODE (sub);
890 if (code == SET || code == CLOBBER)
891 sched_analyze_1 (deps, sub, insn);
892 else
893 sched_analyze_2 (deps, sub, insn);
896 else
897 sched_analyze_2 (deps, x, insn);
899 /* Mark registers CLOBBERED or used by called function. */
900 if (CALL_P (insn))
902 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
904 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
905 sched_analyze_1 (deps, XEXP (link, 0), insn);
906 else
907 sched_analyze_2 (deps, XEXP (link, 0), insn);
909 if (find_reg_note (insn, REG_SETJMP, NULL))
910 reg_pending_barrier = MOVE_BARRIER;
913 if (JUMP_P (insn))
915 rtx next;
916 next = next_nonnote_insn (insn);
917 if (next && BARRIER_P (next))
918 reg_pending_barrier = TRUE_BARRIER;
919 else
921 rtx pending, pending_mem;
922 regset_head tmp_uses, tmp_sets;
923 INIT_REG_SET (&tmp_uses);
924 INIT_REG_SET (&tmp_sets);
926 (*current_sched_info->compute_jump_reg_dependencies)
927 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
928 /* Make latency of jump equal to 0 by using anti-dependence. */
929 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
931 struct deps_reg *reg_last = &deps->reg_last[i];
932 add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
933 add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
934 reg_last->uses_length++;
935 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
937 IOR_REG_SET (reg_pending_sets, &tmp_sets);
939 CLEAR_REG_SET (&tmp_uses);
940 CLEAR_REG_SET (&tmp_sets);
942 /* All memory writes and volatile reads must happen before the
943 jump. Non-volatile reads must happen before the jump iff
944 the result is needed by the above register used mask. */
946 pending = deps->pending_write_insns;
947 pending_mem = deps->pending_write_mems;
948 while (pending)
950 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
951 pending = XEXP (pending, 1);
952 pending_mem = XEXP (pending_mem, 1);
955 pending = deps->pending_read_insns;
956 pending_mem = deps->pending_read_mems;
957 while (pending)
959 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
960 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
961 pending = XEXP (pending, 1);
962 pending_mem = XEXP (pending_mem, 1);
965 add_dependence_list (insn, deps->last_pending_memory_flush,
966 REG_DEP_ANTI);
970 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
971 block, then we must be sure that no instructions are scheduled across it.
972 Otherwise, the reg_n_refs info (which depends on loop_depth) would
973 become incorrect. */
974 if (loop_notes)
976 rtx link;
978 /* Update loop_notes with any notes from this insn. */
979 link = loop_notes;
980 while (XEXP (link, 1))
982 gcc_assert (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
983 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END);
985 reg_pending_barrier = MOVE_BARRIER;
986 link = XEXP (link, 1);
988 XEXP (link, 1) = REG_NOTES (insn);
989 REG_NOTES (insn) = loop_notes;
992 /* If this instruction can throw an exception, then moving it changes
993 where block boundaries fall. This is mighty confusing elsewhere.
994 Therefore, prevent such an instruction from being moved. */
995 if (can_throw_internal (insn))
996 reg_pending_barrier = MOVE_BARRIER;
998 /* Add dependencies if a scheduling barrier was found. */
999 if (reg_pending_barrier)
1001 /* In the case of barrier the most added dependencies are not
1002 real, so we use anti-dependence here. */
1003 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1005 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1007 struct deps_reg *reg_last = &deps->reg_last[i];
1008 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1009 add_dependence_list
1010 (insn, reg_last->sets,
1011 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
1012 add_dependence_list
1013 (insn, reg_last->clobbers,
1014 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
1017 else
1019 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1021 struct deps_reg *reg_last = &deps->reg_last[i];
1022 add_dependence_list_and_free (insn, &reg_last->uses,
1023 REG_DEP_ANTI);
1024 add_dependence_list_and_free
1025 (insn, &reg_last->sets,
1026 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
1027 add_dependence_list_and_free
1028 (insn, &reg_last->clobbers,
1029 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
1030 reg_last->uses_length = 0;
1031 reg_last->clobbers_length = 0;
1035 for (i = 0; i < (unsigned)deps->max_reg; i++)
1037 struct deps_reg *reg_last = &deps->reg_last[i];
1038 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1039 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1042 flush_pending_lists (deps, insn, true, true);
1043 CLEAR_REG_SET (&deps->reg_conditional_sets);
1044 reg_pending_barrier = NOT_A_BARRIER;
1046 else
1048 /* If the current insn is conditional, we can't free any
1049 of the lists. */
1050 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1052 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1054 struct deps_reg *reg_last = &deps->reg_last[i];
1055 add_dependence_list (insn, reg_last->sets, 0);
1056 add_dependence_list (insn, reg_last->clobbers, 0);
1057 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1058 reg_last->uses_length++;
1060 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1062 struct deps_reg *reg_last = &deps->reg_last[i];
1063 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1064 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1065 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1066 reg_last->clobbers_length++;
1068 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1070 struct deps_reg *reg_last = &deps->reg_last[i];
1071 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1072 add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1073 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1074 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1075 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1078 else
1080 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1082 struct deps_reg *reg_last = &deps->reg_last[i];
1083 add_dependence_list (insn, reg_last->sets, 0);
1084 add_dependence_list (insn, reg_last->clobbers, 0);
1085 reg_last->uses_length++;
1086 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1088 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1090 struct deps_reg *reg_last = &deps->reg_last[i];
1091 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1092 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1094 add_dependence_list_and_free (insn, &reg_last->sets,
1095 REG_DEP_OUTPUT);
1096 add_dependence_list_and_free (insn, &reg_last->uses,
1097 REG_DEP_ANTI);
1098 add_dependence_list_and_free (insn, &reg_last->clobbers,
1099 REG_DEP_OUTPUT);
1100 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1101 reg_last->clobbers_length = 0;
1102 reg_last->uses_length = 0;
1104 else
1106 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1107 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1109 reg_last->clobbers_length++;
1110 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1112 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1114 struct deps_reg *reg_last = &deps->reg_last[i];
1115 add_dependence_list_and_free (insn, &reg_last->sets,
1116 REG_DEP_OUTPUT);
1117 add_dependence_list_and_free (insn, &reg_last->clobbers,
1118 REG_DEP_OUTPUT);
1119 add_dependence_list_and_free (insn, &reg_last->uses,
1120 REG_DEP_ANTI);
1121 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1122 reg_last->uses_length = 0;
1123 reg_last->clobbers_length = 0;
1124 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1128 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1129 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1130 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1132 CLEAR_REG_SET (reg_pending_uses);
1133 CLEAR_REG_SET (reg_pending_clobbers);
1134 CLEAR_REG_SET (reg_pending_sets);
1136 /* If we are currently in a libcall scheduling group, then mark the
1137 current insn as being in a scheduling group and that it can not
1138 be moved into a different basic block. */
1140 if (deps->libcall_block_tail_insn)
1142 SCHED_GROUP_P (insn) = 1;
1143 CANT_MOVE (insn) = 1;
1146 /* If a post-call group is still open, see if it should remain so.
1147 This insn must be a simple move of a hard reg to a pseudo or
1148 vice-versa.
1150 We must avoid moving these insns for correctness on
1151 SMALL_REGISTER_CLASS machines, and for special registers like
1152 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1153 hard regs for all targets. */
1155 if (deps->in_post_call_group_p)
1157 rtx tmp, set = single_set (insn);
1158 int src_regno, dest_regno;
1160 if (set == NULL)
1161 goto end_call_group;
1163 tmp = SET_DEST (set);
1164 if (GET_CODE (tmp) == SUBREG)
1165 tmp = SUBREG_REG (tmp);
1166 if (REG_P (tmp))
1167 dest_regno = REGNO (tmp);
1168 else
1169 goto end_call_group;
1171 tmp = SET_SRC (set);
1172 if (GET_CODE (tmp) == SUBREG)
1173 tmp = SUBREG_REG (tmp);
1174 if ((GET_CODE (tmp) == PLUS
1175 || GET_CODE (tmp) == MINUS)
1176 && REG_P (XEXP (tmp, 0))
1177 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
1178 && dest_regno == STACK_POINTER_REGNUM)
1179 src_regno = STACK_POINTER_REGNUM;
1180 else if (REG_P (tmp))
1181 src_regno = REGNO (tmp);
1182 else
1183 goto end_call_group;
1185 if (src_regno < FIRST_PSEUDO_REGISTER
1186 || dest_regno < FIRST_PSEUDO_REGISTER)
1188 if (deps->in_post_call_group_p == post_call_initial)
1189 deps->in_post_call_group_p = post_call;
1191 SCHED_GROUP_P (insn) = 1;
1192 CANT_MOVE (insn) = 1;
1194 else
1196 end_call_group:
1197 deps->in_post_call_group_p = not_post_call;
1201 /* Fixup the dependencies in the sched group. */
1202 if (SCHED_GROUP_P (insn))
1203 fixup_sched_groups (insn);
1206 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1207 for every dependency. */
1209 void
1210 sched_analyze (struct deps *deps, rtx head, rtx tail)
1212 rtx insn;
1213 rtx loop_notes = 0;
1215 if (current_sched_info->use_cselib)
1216 cselib_init (true);
1218 /* Before reload, if the previous block ended in a call, show that
1219 we are inside a post-call group, so as to keep the lifetimes of
1220 hard registers correct. */
1221 if (! reload_completed && !LABEL_P (head))
1223 insn = prev_nonnote_insn (head);
1224 if (insn && CALL_P (insn))
1225 deps->in_post_call_group_p = post_call_initial;
1227 for (insn = head;; insn = NEXT_INSN (insn))
1229 rtx link, end_seq, r0, set;
1231 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
1233 /* Clear out the stale LOG_LINKS from flow. */
1234 free_INSN_LIST_list (&LOG_LINKS (insn));
1236 /* Make each JUMP_INSN a scheduling barrier for memory
1237 references. */
1238 if (JUMP_P (insn))
1240 /* Keep the list a reasonable size. */
1241 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1242 flush_pending_lists (deps, insn, true, true);
1243 else
1244 deps->last_pending_memory_flush
1245 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1247 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1248 loop_notes = 0;
1250 else if (CALL_P (insn))
1252 int i;
1254 CANT_MOVE (insn) = 1;
1256 /* Clear out the stale LOG_LINKS from flow. */
1257 free_INSN_LIST_list (&LOG_LINKS (insn));
1259 if (find_reg_note (insn, REG_SETJMP, NULL))
1261 /* This is setjmp. Assume that all registers, not just
1262 hard registers, may be clobbered by this call. */
1263 reg_pending_barrier = MOVE_BARRIER;
1265 else
1267 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1268 /* A call may read and modify global register variables. */
1269 if (global_regs[i])
1271 SET_REGNO_REG_SET (reg_pending_sets, i);
1272 SET_REGNO_REG_SET (reg_pending_uses, i);
1274 /* Other call-clobbered hard regs may be clobbered.
1275 Since we only have a choice between 'might be clobbered'
1276 and 'definitely not clobbered', we must include all
1277 partly call-clobbered registers here. */
1278 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1279 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1280 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1281 /* We don't know what set of fixed registers might be used
1282 by the function, but it is certain that the stack pointer
1283 is among them, but be conservative. */
1284 else if (fixed_regs[i])
1285 SET_REGNO_REG_SET (reg_pending_uses, i);
1286 /* The frame pointer is normally not used by the function
1287 itself, but by the debugger. */
1288 /* ??? MIPS o32 is an exception. It uses the frame pointer
1289 in the macro expansion of jal but does not represent this
1290 fact in the call_insn rtl. */
1291 else if (i == FRAME_POINTER_REGNUM
1292 || (i == HARD_FRAME_POINTER_REGNUM
1293 && (! reload_completed || frame_pointer_needed)))
1294 SET_REGNO_REG_SET (reg_pending_uses, i);
1297 /* For each insn which shouldn't cross a call, add a dependence
1298 between that insn and this call insn. */
1299 add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1300 REG_DEP_ANTI);
1302 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1303 loop_notes = 0;
1305 /* In the absence of interprocedural alias analysis, we must flush
1306 all pending reads and writes, and start new dependencies starting
1307 from here. But only flush writes for constant calls (which may
1308 be passed a pointer to something we haven't written yet). */
1309 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1311 /* Remember the last function call for limiting lifetimes. */
1312 free_INSN_LIST_list (&deps->last_function_call);
1313 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1315 /* Before reload, begin a post-call group, so as to keep the
1316 lifetimes of hard registers correct. */
1317 if (! reload_completed)
1318 deps->in_post_call_group_p = post_call;
1321 /* EH_REGION insn notes can not appear until well after we complete
1322 scheduling. */
1323 if (NOTE_P (insn))
1324 gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1325 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
1327 /* See comments on reemit_notes as to why we do this.
1328 ??? Actually, the reemit_notes just say what is done, not why. */
1330 if (NOTE_P (insn)
1331 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1332 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END))
1334 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1335 GEN_INT (NOTE_LINE_NUMBER (insn)),
1336 loop_notes);
1337 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1340 if (current_sched_info->use_cselib)
1341 cselib_process_insn (insn);
1343 /* Now that we have completed handling INSN, check and see if it is
1344 a CLOBBER beginning a libcall block. If it is, record the
1345 end of the libcall sequence.
1347 We want to schedule libcall blocks as a unit before reload. While
1348 this restricts scheduling, it preserves the meaning of a libcall
1349 block.
1351 As a side effect, we may get better code due to decreased register
1352 pressure as well as less chance of a foreign insn appearing in
1353 a libcall block. */
1354 if (!reload_completed
1355 /* Note we may have nested libcall sequences. We only care about
1356 the outermost libcall sequence. */
1357 && deps->libcall_block_tail_insn == 0
1358 /* The sequence must start with a clobber of a register. */
1359 && NONJUMP_INSN_P (insn)
1360 && GET_CODE (PATTERN (insn)) == CLOBBER
1361 && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
1362 && REG_P (XEXP (PATTERN (insn), 0))
1363 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1364 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1365 && (end_seq = XEXP (link, 0)) != 0
1366 /* The insn referenced by the REG_LIBCALL note must be a
1367 simple nop copy with the same destination as the register
1368 mentioned in the clobber. */
1369 && (set = single_set (end_seq)) != 0
1370 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1371 /* And finally the insn referenced by the REG_LIBCALL must
1372 also contain a REG_EQUAL note and a REG_RETVAL note. */
1373 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1374 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1375 deps->libcall_block_tail_insn = XEXP (link, 0);
1377 /* If we have reached the end of a libcall block, then close the
1378 block. */
1379 if (deps->libcall_block_tail_insn == insn)
1380 deps->libcall_block_tail_insn = 0;
1382 if (insn == tail)
1384 if (current_sched_info->use_cselib)
1385 cselib_finish ();
1386 return;
1389 gcc_unreachable ();
1393 /* The following function adds forward dependence (FROM, TO) with
1394 given DEP_TYPE. The forward dependence should be not exist before. */
1396 void
1397 add_forward_dependence (rtx from, rtx to, enum reg_note dep_type)
1399 rtx new_link;
1401 #ifdef ENABLE_CHECKING
1402 /* If add_dependence is working properly there should never
1403 be notes, deleted insns or duplicates in the backward
1404 links. Thus we need not check for them here.
1406 However, if we have enabled checking we might as well go
1407 ahead and verify that add_dependence worked properly. */
1408 gcc_assert (!NOTE_P (from));
1409 gcc_assert (!INSN_DELETED_P (from));
1410 if (forward_dependency_cache)
1411 gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
1412 INSN_LUID (to)));
1413 else
1414 gcc_assert (!find_insn_list (to, INSN_DEPEND (from)));
1416 /* ??? If bitmap_bit_p is a predicate, what is this supposed to do? */
1417 if (forward_dependency_cache != NULL)
1418 bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
1419 INSN_LUID (to));
1420 #endif
1422 new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
1424 PUT_REG_NOTE_KIND (new_link, dep_type);
1426 INSN_DEPEND (from) = new_link;
1427 INSN_DEP_COUNT (to) += 1;
1430 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1431 dependences from LOG_LINKS to build forward dependences in
1432 INSN_DEPEND. */
1434 void
1435 compute_forward_dependences (rtx head, rtx tail)
1437 rtx insn, link;
1438 rtx next_tail;
1440 next_tail = NEXT_INSN (tail);
1441 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1443 if (! INSN_P (insn))
1444 continue;
1446 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1447 add_forward_dependence (XEXP (link, 0), insn, REG_NOTE_KIND (link));
1451 /* Initialize variables for region data dependence analysis.
1452 n_bbs is the number of region blocks. */
1454 void
1455 init_deps (struct deps *deps)
1457 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1459 deps->max_reg = max_reg;
1460 deps->reg_last = xcalloc (max_reg, sizeof (struct deps_reg));
1461 INIT_REG_SET (&deps->reg_last_in_use);
1462 INIT_REG_SET (&deps->reg_conditional_sets);
1464 deps->pending_read_insns = 0;
1465 deps->pending_read_mems = 0;
1466 deps->pending_write_insns = 0;
1467 deps->pending_write_mems = 0;
1468 deps->pending_lists_length = 0;
1469 deps->pending_flush_length = 0;
1470 deps->last_pending_memory_flush = 0;
1471 deps->last_function_call = 0;
1472 deps->sched_before_next_call = 0;
1473 deps->in_post_call_group_p = not_post_call;
1474 deps->libcall_block_tail_insn = 0;
1477 /* Free insn lists found in DEPS. */
1479 void
1480 free_deps (struct deps *deps)
1482 unsigned i;
1483 reg_set_iterator rsi;
1485 free_INSN_LIST_list (&deps->pending_read_insns);
1486 free_EXPR_LIST_list (&deps->pending_read_mems);
1487 free_INSN_LIST_list (&deps->pending_write_insns);
1488 free_EXPR_LIST_list (&deps->pending_write_mems);
1489 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1491 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1492 times. For a testcase with 42000 regs and 8000 small basic blocks,
1493 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1494 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1496 struct deps_reg *reg_last = &deps->reg_last[i];
1497 if (reg_last->uses)
1498 free_INSN_LIST_list (&reg_last->uses);
1499 if (reg_last->sets)
1500 free_INSN_LIST_list (&reg_last->sets);
1501 if (reg_last->clobbers)
1502 free_INSN_LIST_list (&reg_last->clobbers);
1504 CLEAR_REG_SET (&deps->reg_last_in_use);
1505 CLEAR_REG_SET (&deps->reg_conditional_sets);
1507 free (deps->reg_last);
1510 /* If it is profitable to use them, initialize caches for tracking
1511 dependency information. LUID is the number of insns to be scheduled,
1512 it is used in the estimate of profitability. */
1514 void
1515 init_dependency_caches (int luid)
1517 /* ?!? We could save some memory by computing a per-region luid mapping
1518 which could reduce both the number of vectors in the cache and the size
1519 of each vector. Instead we just avoid the cache entirely unless the
1520 average number of instructions in a basic block is very high. See
1521 the comment before the declaration of true_dependency_cache for
1522 what we consider "very high". */
1523 if (luid / n_basic_blocks > 100 * 5)
1525 int i;
1526 true_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1527 anti_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1528 output_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1529 #ifdef ENABLE_CHECKING
1530 forward_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1531 #endif
1532 for (i = 0; i < luid; i++)
1534 bitmap_initialize (&true_dependency_cache[i], 0);
1535 bitmap_initialize (&anti_dependency_cache[i], 0);
1536 bitmap_initialize (&output_dependency_cache[i], 0);
1537 #ifdef ENABLE_CHECKING
1538 bitmap_initialize (&forward_dependency_cache[i], 0);
1539 #endif
1541 cache_size = luid;
1545 /* Free the caches allocated in init_dependency_caches. */
1547 void
1548 free_dependency_caches (void)
1550 if (true_dependency_cache)
1552 int i;
1554 for (i = 0; i < cache_size; i++)
1556 bitmap_clear (&true_dependency_cache[i]);
1557 bitmap_clear (&anti_dependency_cache[i]);
1558 bitmap_clear (&output_dependency_cache[i]);
1559 #ifdef ENABLE_CHECKING
1560 bitmap_clear (&forward_dependency_cache[i]);
1561 #endif
1563 free (true_dependency_cache);
1564 true_dependency_cache = NULL;
1565 free (anti_dependency_cache);
1566 anti_dependency_cache = NULL;
1567 free (output_dependency_cache);
1568 output_dependency_cache = NULL;
1569 #ifdef ENABLE_CHECKING
1570 free (forward_dependency_cache);
1571 forward_dependency_cache = NULL;
1572 #endif
1576 /* Initialize some global variables needed by the dependency analysis
1577 code. */
1579 void
1580 init_deps_global (void)
1582 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
1583 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
1584 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
1585 reg_pending_barrier = NOT_A_BARRIER;
1588 /* Free everything used by the dependency analysis code. */
1590 void
1591 finish_deps_global (void)
1593 FREE_REG_SET (reg_pending_sets);
1594 FREE_REG_SET (reg_pending_clobbers);
1595 FREE_REG_SET (reg_pending_uses);