* config/arm/elf.h (ASM_OUTPUT_ALIGNED_COMMON): Remove definition.
[official-gcc.git] / gcc / sched-deps.c
blob5b1062bbc14cac3fd4a84f6f8fe77c880ecf1173
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "flags.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
39 #include "except.h"
40 #include "toplev.h"
41 #include "recog.h"
42 #include "sched-int.h"
43 #include "params.h"
44 #include "cselib.h"
45 #include "df.h"
47 extern char *reg_known_equiv_p;
48 extern rtx *reg_known_value;
50 static regset_head reg_pending_sets_head;
51 static regset_head reg_pending_clobbers_head;
52 static regset_head reg_pending_uses_head;
54 static regset reg_pending_sets;
55 static regset reg_pending_clobbers;
56 static regset reg_pending_uses;
58 /* The following enumeration values tell us what dependencies we
59 should use to implement the barrier. We use true-dependencies for
60 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
61 enum reg_pending_barrier_mode
63 NOT_A_BARRIER = 0,
64 MOVE_BARRIER,
65 TRUE_BARRIER
68 static enum reg_pending_barrier_mode reg_pending_barrier;
70 /* To speed up the test for duplicate dependency links we keep a
71 record of dependencies created by add_dependence when the average
72 number of instructions in a basic block is very large.
74 Studies have shown that there is typically around 5 instructions between
75 branches for typical C code. So we can make a guess that the average
76 basic block is approximately 5 instructions long; we will choose 100X
77 the average size as a very large basic block.
79 Each insn has associated bitmaps for its dependencies. Each bitmap
80 has enough entries to represent a dependency on any other insn in
81 the insn chain. All bitmap for true dependencies cache is
82 allocated then the rest two ones are also allocated. */
83 static sbitmap *true_dependency_cache;
84 static sbitmap *anti_dependency_cache;
85 static sbitmap *output_dependency_cache;
87 /* To speed up checking consistency of formed forward insn
88 dependencies we use the following cache. Another possible solution
89 could be switching off checking duplication of insns in forward
90 dependencies. */
91 #ifdef ENABLE_CHECKING
92 static sbitmap *forward_dependency_cache;
93 #endif
95 static int deps_may_trap_p PARAMS ((rtx));
96 static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
97 static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
98 static void set_sched_group_p PARAMS ((rtx));
100 static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
101 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
102 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
103 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
105 static rtx get_condition PARAMS ((rtx));
106 static int conditions_mutex_p PARAMS ((rtx, rtx));
108 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
110 static int
111 deps_may_trap_p (mem)
112 rtx mem;
114 rtx addr = XEXP (mem, 0);
116 if (REG_P (addr)
117 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
118 && reg_known_value[REGNO (addr)])
119 addr = reg_known_value[REGNO (addr)];
120 return rtx_addr_can_trap_p (addr);
123 /* Return the INSN_LIST containing INSN in LIST, or NULL
124 if LIST does not contain INSN. */
127 find_insn_list (insn, list)
128 rtx insn;
129 rtx list;
131 while (list)
133 if (XEXP (list, 0) == insn)
134 return list;
135 list = XEXP (list, 1);
137 return 0;
140 /* Find the condition under which INSN is executed. */
142 static rtx
143 get_condition (insn)
144 rtx insn;
146 rtx pat = PATTERN (insn);
147 rtx cond;
149 if (pat == 0)
150 return 0;
151 if (GET_CODE (pat) == COND_EXEC)
152 return COND_EXEC_TEST (pat);
153 if (GET_CODE (insn) != JUMP_INSN)
154 return 0;
155 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
156 return 0;
157 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
158 return 0;
159 pat = SET_DEST (pat);
160 cond = XEXP (pat, 0);
161 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
162 && XEXP (cond, 2) == pc_rtx)
163 return cond;
164 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
165 && XEXP (cond, 1) == pc_rtx)
166 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
167 XEXP (cond, 0), XEXP (cond, 1));
168 else
169 return 0;
172 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
174 static int
175 conditions_mutex_p (cond1, cond2)
176 rtx cond1, cond2;
178 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
179 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
180 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
181 && XEXP (cond1, 0) == XEXP (cond2, 0)
182 && XEXP (cond1, 1) == XEXP (cond2, 1))
183 return 1;
184 return 0;
187 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
188 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
189 type of dependence that this link represents. The function returns
190 nonzero if a new entry has been added to insn's LOG_LINK. */
193 add_dependence (insn, elem, dep_type)
194 rtx insn;
195 rtx elem;
196 enum reg_note dep_type;
198 rtx link;
199 int present_p;
200 rtx cond1, cond2;
202 /* Don't depend an insn on itself. */
203 if (insn == elem)
204 return 0;
206 /* We can get a dependency on deleted insns due to optimizations in
207 the register allocation and reloading or due to splitting. Any
208 such dependency is useless and can be ignored. */
209 if (GET_CODE (elem) == NOTE)
210 return 0;
212 /* flow.c doesn't handle conditional lifetimes entirely correctly;
213 calls mess up the conditional lifetimes. */
214 /* ??? add_dependence is the wrong place to be eliding dependencies,
215 as that forgets that the condition expressions themselves may
216 be dependent. */
217 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
219 cond1 = get_condition (insn);
220 cond2 = get_condition (elem);
221 if (cond1 && cond2
222 && conditions_mutex_p (cond1, cond2)
223 /* Make sure first instruction doesn't affect condition of second
224 instruction if switched. */
225 && !modified_in_p (cond1, elem)
226 /* Make sure second instruction doesn't affect condition of first
227 instruction if switched. */
228 && !modified_in_p (cond2, insn))
229 return 0;
232 present_p = 1;
233 #ifdef INSN_SCHEDULING
234 /* ??? No good way to tell from here whether we're doing interblock
235 scheduling. Possibly add another callback. */
236 #if 0
237 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
238 No need for interblock dependences with calls, since
239 calls are not moved between blocks. Note: the edge where
240 elem is a CALL is still required. */
241 if (GET_CODE (insn) == CALL_INSN
242 && (INSN_BB (elem) != INSN_BB (insn)))
243 return 0;
244 #endif
246 /* If we already have a dependency for ELEM, then we do not need to
247 do anything. Avoiding the list walk below can cut compile times
248 dramatically for some code. */
249 if (true_dependency_cache != NULL)
251 enum reg_note present_dep_type = 0;
253 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
254 abort ();
255 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
256 /* Do nothing (present_set_type is already 0). */
258 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
259 INSN_LUID (elem)))
260 present_dep_type = REG_DEP_ANTI;
261 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
262 INSN_LUID (elem)))
263 present_dep_type = REG_DEP_OUTPUT;
264 else
265 present_p = 0;
266 if (present_p && (int) dep_type >= (int) present_dep_type)
267 return 0;
269 #endif
271 /* Check that we don't already have this dependence. */
272 if (present_p)
273 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
274 if (XEXP (link, 0) == elem)
276 #ifdef INSN_SCHEDULING
277 /* Clear corresponding cache entry because type of the link
278 may be changed. */
279 if (true_dependency_cache != NULL)
281 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
282 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
283 INSN_LUID (elem));
284 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
285 && output_dependency_cache)
286 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
287 INSN_LUID (elem));
288 else
289 abort ();
291 #endif
293 /* If this is a more restrictive type of dependence than the existing
294 one, then change the existing dependence to this type. */
295 if ((int) dep_type < (int) REG_NOTE_KIND (link))
296 PUT_REG_NOTE_KIND (link, dep_type);
298 #ifdef INSN_SCHEDULING
299 /* If we are adding a dependency to INSN's LOG_LINKs, then
300 note that in the bitmap caches of dependency information. */
301 if (true_dependency_cache != NULL)
303 if ((int) REG_NOTE_KIND (link) == 0)
304 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
305 INSN_LUID (elem));
306 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
307 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
308 INSN_LUID (elem));
309 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
310 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
311 INSN_LUID (elem));
313 #endif
314 return 0;
316 /* Might want to check one level of transitivity to save conses. */
318 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
319 LOG_LINKS (insn) = link;
321 /* Insn dependency, not data dependency. */
322 PUT_REG_NOTE_KIND (link, dep_type);
324 #ifdef INSN_SCHEDULING
325 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
326 in the bitmap caches of dependency information. */
327 if (true_dependency_cache != NULL)
329 if ((int) dep_type == 0)
330 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
331 else if (dep_type == REG_DEP_ANTI)
332 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
333 else if (dep_type == REG_DEP_OUTPUT)
334 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
336 #endif
337 return 1;
340 /* A convenience wrapper to operate on an entire list. */
342 static void
343 add_dependence_list (insn, list, dep_type)
344 rtx insn, list;
345 enum reg_note dep_type;
347 for (; list; list = XEXP (list, 1))
348 add_dependence (insn, XEXP (list, 0), dep_type);
351 /* Similar, but free *LISTP at the same time. */
353 static void
354 add_dependence_list_and_free (insn, listp, dep_type)
355 rtx insn;
356 rtx *listp;
357 enum reg_note dep_type;
359 rtx list, next;
360 for (list = *listp, *listp = NULL; list ; list = next)
362 next = XEXP (list, 1);
363 add_dependence (insn, XEXP (list, 0), dep_type);
364 free_INSN_LIST_node (list);
368 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
369 goes along with that. */
371 static void
372 set_sched_group_p (insn)
373 rtx insn;
375 rtx prev;
377 SCHED_GROUP_P (insn) = 1;
379 prev = prev_nonnote_insn (insn);
380 add_dependence (insn, prev, REG_DEP_ANTI);
383 /* Process an insn's memory dependencies. There are four kinds of
384 dependencies:
386 (0) read dependence: read follows read
387 (1) true dependence: read follows write
388 (2) anti dependence: write follows read
389 (3) output dependence: write follows write
391 We are careful to build only dependencies which actually exist, and
392 use transitivity to avoid building too many links. */
394 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
395 The MEM is a memory reference contained within INSN, which we are saving
396 so that we can do memory aliasing on it. */
398 void
399 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
400 struct deps *deps;
401 rtx *insn_list, *mem_list, insn, mem;
403 rtx link;
405 link = alloc_INSN_LIST (insn, *insn_list);
406 *insn_list = link;
408 if (current_sched_info->use_cselib)
410 mem = shallow_copy_rtx (mem);
411 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
413 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
414 *mem_list = link;
416 deps->pending_lists_length++;
419 /* Make a dependency between every memory reference on the pending lists
420 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
421 dependencies for a read operation, similarly with FOR_WRITE. */
423 static void
424 flush_pending_lists (deps, insn, for_read, for_write)
425 struct deps *deps;
426 rtx insn;
427 int for_read, for_write;
429 if (for_write)
431 add_dependence_list_and_free (insn, &deps->pending_read_insns,
432 REG_DEP_ANTI);
433 free_EXPR_LIST_list (&deps->pending_read_mems);
436 add_dependence_list_and_free (insn, &deps->pending_write_insns,
437 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
438 free_EXPR_LIST_list (&deps->pending_write_mems);
439 deps->pending_lists_length = 0;
441 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
442 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
443 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
444 deps->pending_flush_length = 1;
447 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
448 rtx, X, creating all dependencies generated by the write to the
449 destination of X, and reads of everything mentioned. */
451 static void
452 sched_analyze_1 (deps, x, insn)
453 struct deps *deps;
454 rtx x;
455 rtx insn;
457 int regno;
458 rtx dest = XEXP (x, 0);
459 enum rtx_code code = GET_CODE (x);
461 if (dest == 0)
462 return;
464 if (GET_CODE (dest) == PARALLEL)
466 int i;
468 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
469 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
470 sched_analyze_1 (deps,
471 gen_rtx_CLOBBER (VOIDmode,
472 XEXP (XVECEXP (dest, 0, i), 0)),
473 insn);
475 if (GET_CODE (x) == SET)
476 sched_analyze_2 (deps, SET_SRC (x), insn);
477 return;
480 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
481 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
483 if (GET_CODE (dest) == STRICT_LOW_PART
484 || GET_CODE (dest) == ZERO_EXTRACT
485 || GET_CODE (dest) == SIGN_EXTRACT
486 || read_modify_subreg_p (dest))
488 /* These both read and modify the result. We must handle
489 them as writes to get proper dependencies for following
490 instructions. We must handle them as reads to get proper
491 dependencies from this to previous instructions.
492 Thus we need to call sched_analyze_2. */
494 sched_analyze_2 (deps, XEXP (dest, 0), insn);
496 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
498 /* The second and third arguments are values read by this insn. */
499 sched_analyze_2 (deps, XEXP (dest, 1), insn);
500 sched_analyze_2 (deps, XEXP (dest, 2), insn);
502 dest = XEXP (dest, 0);
505 if (GET_CODE (dest) == REG)
507 regno = REGNO (dest);
509 /* A hard reg in a wide mode may really be multiple registers.
510 If so, mark all of them just like the first. */
511 if (regno < FIRST_PSEUDO_REGISTER)
513 int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
514 if (code == SET)
516 while (--i >= 0)
517 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
519 else
521 while (--i >= 0)
522 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
525 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
526 it does not reload. Ignore these as they have served their
527 purpose already. */
528 else if (regno >= deps->max_reg)
530 if (GET_CODE (PATTERN (insn)) != USE
531 && GET_CODE (PATTERN (insn)) != CLOBBER)
532 abort ();
534 else
536 if (code == SET)
537 SET_REGNO_REG_SET (reg_pending_sets, regno);
538 else
539 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
541 /* Pseudos that are REG_EQUIV to something may be replaced
542 by that during reloading. We need only add dependencies for
543 the address in the REG_EQUIV note. */
544 if (!reload_completed
545 && reg_known_equiv_p[regno]
546 && GET_CODE (reg_known_value[regno]) == MEM)
547 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
549 /* Don't let it cross a call after scheduling if it doesn't
550 already cross one. */
551 if (REG_N_CALLS_CROSSED (regno) == 0)
552 add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
555 else if (GET_CODE (dest) == MEM)
557 /* Writing memory. */
558 rtx t = dest;
560 if (current_sched_info->use_cselib)
562 t = shallow_copy_rtx (dest);
563 cselib_lookup (XEXP (t, 0), Pmode, 1);
564 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
567 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
569 /* Flush all pending reads and writes to prevent the pending lists
570 from getting any larger. Insn scheduling runs too slowly when
571 these lists get long. When compiling GCC with itself,
572 this flush occurs 8 times for sparc, and 10 times for m88k using
573 the default value of 32. */
574 flush_pending_lists (deps, insn, false, true);
576 else
578 rtx pending, pending_mem;
580 pending = deps->pending_read_insns;
581 pending_mem = deps->pending_read_mems;
582 while (pending)
584 if (anti_dependence (XEXP (pending_mem, 0), t))
585 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
587 pending = XEXP (pending, 1);
588 pending_mem = XEXP (pending_mem, 1);
591 pending = deps->pending_write_insns;
592 pending_mem = deps->pending_write_mems;
593 while (pending)
595 if (output_dependence (XEXP (pending_mem, 0), t))
596 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
598 pending = XEXP (pending, 1);
599 pending_mem = XEXP (pending_mem, 1);
602 add_dependence_list (insn, deps->last_pending_memory_flush,
603 REG_DEP_ANTI);
605 add_insn_mem_dependence (deps, &deps->pending_write_insns,
606 &deps->pending_write_mems, insn, dest);
608 sched_analyze_2 (deps, XEXP (dest, 0), insn);
611 /* Analyze reads. */
612 if (GET_CODE (x) == SET)
613 sched_analyze_2 (deps, SET_SRC (x), insn);
616 /* Analyze the uses of memory and registers in rtx X in INSN. */
618 static void
619 sched_analyze_2 (deps, x, insn)
620 struct deps *deps;
621 rtx x;
622 rtx insn;
624 int i;
625 int j;
626 enum rtx_code code;
627 const char *fmt;
629 if (x == 0)
630 return;
632 code = GET_CODE (x);
634 switch (code)
636 case CONST_INT:
637 case CONST_DOUBLE:
638 case CONST_VECTOR:
639 case SYMBOL_REF:
640 case CONST:
641 case LABEL_REF:
642 /* Ignore constants. Note that we must handle CONST_DOUBLE here
643 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
644 this does not mean that this insn is using cc0. */
645 return;
647 #ifdef HAVE_cc0
648 case CC0:
649 /* User of CC0 depends on immediately preceding insn. */
650 set_sched_group_p (insn);
651 return;
652 #endif
654 case REG:
656 int regno = REGNO (x);
657 if (regno < FIRST_PSEUDO_REGISTER)
659 int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
660 while (--i >= 0)
661 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
663 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
664 it does not reload. Ignore these as they have served their
665 purpose already. */
666 else if (regno >= deps->max_reg)
668 if (GET_CODE (PATTERN (insn)) != USE
669 && GET_CODE (PATTERN (insn)) != CLOBBER)
670 abort ();
672 else
674 SET_REGNO_REG_SET (reg_pending_uses, regno);
676 /* Pseudos that are REG_EQUIV to something may be replaced
677 by that during reloading. We need only add dependencies for
678 the address in the REG_EQUIV note. */
679 if (!reload_completed
680 && reg_known_equiv_p[regno]
681 && GET_CODE (reg_known_value[regno]) == MEM)
682 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
684 /* If the register does not already cross any calls, then add this
685 insn to the sched_before_next_call list so that it will still
686 not cross calls after scheduling. */
687 if (REG_N_CALLS_CROSSED (regno) == 0)
688 deps->sched_before_next_call
689 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
691 return;
694 case MEM:
696 /* Reading memory. */
697 rtx u;
698 rtx pending, pending_mem;
699 rtx t = x;
701 if (current_sched_info->use_cselib)
703 t = shallow_copy_rtx (t);
704 cselib_lookup (XEXP (t, 0), Pmode, 1);
705 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
707 pending = deps->pending_read_insns;
708 pending_mem = deps->pending_read_mems;
709 while (pending)
711 if (read_dependence (XEXP (pending_mem, 0), t))
712 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
714 pending = XEXP (pending, 1);
715 pending_mem = XEXP (pending_mem, 1);
718 pending = deps->pending_write_insns;
719 pending_mem = deps->pending_write_mems;
720 while (pending)
722 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
723 t, rtx_varies_p))
724 add_dependence (insn, XEXP (pending, 0), 0);
726 pending = XEXP (pending, 1);
727 pending_mem = XEXP (pending_mem, 1);
730 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
731 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
732 || deps_may_trap_p (x))
733 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
735 /* Always add these dependencies to pending_reads, since
736 this insn may be followed by a write. */
737 add_insn_mem_dependence (deps, &deps->pending_read_insns,
738 &deps->pending_read_mems, insn, x);
740 /* Take advantage of tail recursion here. */
741 sched_analyze_2 (deps, XEXP (x, 0), insn);
742 return;
745 /* Force pending stores to memory in case a trap handler needs them. */
746 case TRAP_IF:
747 flush_pending_lists (deps, insn, true, false);
748 break;
750 case ASM_OPERANDS:
751 case ASM_INPUT:
752 case UNSPEC_VOLATILE:
754 /* Traditional and volatile asm instructions must be considered to use
755 and clobber all hard registers, all pseudo-registers and all of
756 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
758 Consider for instance a volatile asm that changes the fpu rounding
759 mode. An insn should not be moved across this even if it only uses
760 pseudo-regs because it might give an incorrectly rounded result. */
761 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
762 reg_pending_barrier = TRUE_BARRIER;
764 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
765 We can not just fall through here since then we would be confused
766 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
767 traditional asms unlike their normal usage. */
769 if (code == ASM_OPERANDS)
771 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
772 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
773 return;
775 break;
778 case PRE_DEC:
779 case POST_DEC:
780 case PRE_INC:
781 case POST_INC:
782 /* These both read and modify the result. We must handle them as writes
783 to get proper dependencies for following instructions. We must handle
784 them as reads to get proper dependencies from this to previous
785 instructions. Thus we need to pass them to both sched_analyze_1
786 and sched_analyze_2. We must call sched_analyze_2 first in order
787 to get the proper antecedent for the read. */
788 sched_analyze_2 (deps, XEXP (x, 0), insn);
789 sched_analyze_1 (deps, x, insn);
790 return;
792 case POST_MODIFY:
793 case PRE_MODIFY:
794 /* op0 = op0 + op1 */
795 sched_analyze_2 (deps, XEXP (x, 0), insn);
796 sched_analyze_2 (deps, XEXP (x, 1), insn);
797 sched_analyze_1 (deps, x, insn);
798 return;
800 default:
801 break;
804 /* Other cases: walk the insn. */
805 fmt = GET_RTX_FORMAT (code);
806 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
808 if (fmt[i] == 'e')
809 sched_analyze_2 (deps, XEXP (x, i), insn);
810 else if (fmt[i] == 'E')
811 for (j = 0; j < XVECLEN (x, i); j++)
812 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
816 /* Analyze an INSN with pattern X to find all dependencies. */
818 static void
819 sched_analyze_insn (deps, x, insn, loop_notes)
820 struct deps *deps;
821 rtx x, insn;
822 rtx loop_notes;
824 RTX_CODE code = GET_CODE (x);
825 rtx link;
826 int i;
828 if (code == COND_EXEC)
830 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
832 /* ??? Should be recording conditions so we reduce the number of
833 false dependencies. */
834 x = COND_EXEC_CODE (x);
835 code = GET_CODE (x);
837 if (code == SET || code == CLOBBER)
839 sched_analyze_1 (deps, x, insn);
841 /* Bare clobber insns are used for letting life analysis, reg-stack
842 and others know that a value is dead. Depend on the last call
843 instruction so that reg-stack won't get confused. */
844 if (code == CLOBBER)
845 add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
847 else if (code == PARALLEL)
849 int i;
850 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
852 rtx sub = XVECEXP (x, 0, i);
853 code = GET_CODE (sub);
855 if (code == COND_EXEC)
857 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
858 sub = COND_EXEC_CODE (sub);
859 code = GET_CODE (sub);
861 if (code == SET || code == CLOBBER)
862 sched_analyze_1 (deps, sub, insn);
863 else
864 sched_analyze_2 (deps, sub, insn);
867 else
868 sched_analyze_2 (deps, x, insn);
870 /* Mark registers CLOBBERED or used by called function. */
871 if (GET_CODE (insn) == CALL_INSN)
873 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
875 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
876 sched_analyze_1 (deps, XEXP (link, 0), insn);
877 else
878 sched_analyze_2 (deps, XEXP (link, 0), insn);
880 if (find_reg_note (insn, REG_SETJMP, NULL))
881 reg_pending_barrier = MOVE_BARRIER;
884 if (GET_CODE (insn) == JUMP_INSN)
886 rtx next;
887 next = next_nonnote_insn (insn);
888 if (next && GET_CODE (next) == BARRIER)
889 reg_pending_barrier = TRUE_BARRIER;
890 else
892 rtx pending, pending_mem;
893 regset_head tmp;
894 INIT_REG_SET (&tmp);
896 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
897 /* Make latency of jump equal to 0 by using anti-dependence. */
898 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
900 struct deps_reg *reg_last = &deps->reg_last[i];
901 add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
902 add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
903 reg_last->uses_length++;
904 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
906 CLEAR_REG_SET (&tmp);
908 /* All memory writes and volatile reads must happen before the
909 jump. Non-volatile reads must happen before the jump iff
910 the result is needed by the above register used mask. */
912 pending = deps->pending_write_insns;
913 pending_mem = deps->pending_write_mems;
914 while (pending)
916 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
917 pending = XEXP (pending, 1);
918 pending_mem = XEXP (pending_mem, 1);
921 pending = deps->pending_read_insns;
922 pending_mem = deps->pending_read_mems;
923 while (pending)
925 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
926 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
927 pending = XEXP (pending, 1);
928 pending_mem = XEXP (pending_mem, 1);
931 add_dependence_list (insn, deps->last_pending_memory_flush,
932 REG_DEP_ANTI);
936 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
937 block, then we must be sure that no instructions are scheduled across it.
938 Otherwise, the reg_n_refs info (which depends on loop_depth) would
939 become incorrect. */
940 if (loop_notes)
942 rtx link;
944 /* Update loop_notes with any notes from this insn. Also determine
945 if any of the notes on the list correspond to instruction scheduling
946 barriers (loop, eh & setjmp notes, but not range notes). */
947 link = loop_notes;
948 while (XEXP (link, 1))
950 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
951 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
952 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
953 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
954 reg_pending_barrier = MOVE_BARRIER;
956 link = XEXP (link, 1);
958 XEXP (link, 1) = REG_NOTES (insn);
959 REG_NOTES (insn) = loop_notes;
962 /* If this instruction can throw an exception, then moving it changes
963 where block boundaries fall. This is mighty confusing elsewhere.
964 Therefore, prevent such an instruction from being moved. */
965 if (can_throw_internal (insn))
966 reg_pending_barrier = MOVE_BARRIER;
968 /* Add dependencies if a scheduling barrier was found. */
969 if (reg_pending_barrier)
971 /* In the case of barrier the most added dependencies are not
972 real, so we use anti-dependence here. */
973 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
975 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
977 struct deps_reg *reg_last = &deps->reg_last[i];
978 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
979 add_dependence_list
980 (insn, reg_last->sets,
981 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
982 add_dependence_list
983 (insn, reg_last->clobbers,
984 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
987 else
989 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
991 struct deps_reg *reg_last = &deps->reg_last[i];
992 add_dependence_list_and_free (insn, &reg_last->uses,
993 REG_DEP_ANTI);
994 add_dependence_list_and_free
995 (insn, &reg_last->sets,
996 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
997 add_dependence_list_and_free
998 (insn, &reg_last->clobbers,
999 reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
1000 reg_last->uses_length = 0;
1001 reg_last->clobbers_length = 0;
1005 for (i = 0; i < deps->max_reg; i++)
1007 struct deps_reg *reg_last = &deps->reg_last[i];
1008 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1009 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1012 flush_pending_lists (deps, insn, true, true);
1013 reg_pending_barrier = NOT_A_BARRIER;
1015 else
1017 /* If the current insn is conditional, we can't free any
1018 of the lists. */
1019 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1021 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1023 struct deps_reg *reg_last = &deps->reg_last[i];
1024 add_dependence_list (insn, reg_last->sets, 0);
1025 add_dependence_list (insn, reg_last->clobbers, 0);
1026 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1027 reg_last->uses_length++;
1029 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1031 struct deps_reg *reg_last = &deps->reg_last[i];
1032 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1033 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1034 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1035 reg_last->clobbers_length++;
1037 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1039 struct deps_reg *reg_last = &deps->reg_last[i];
1040 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1041 add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1042 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1043 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1046 else
1048 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1050 struct deps_reg *reg_last = &deps->reg_last[i];
1051 add_dependence_list (insn, reg_last->sets, 0);
1052 add_dependence_list (insn, reg_last->clobbers, 0);
1053 reg_last->uses_length++;
1054 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1056 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1058 struct deps_reg *reg_last = &deps->reg_last[i];
1059 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1060 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1062 add_dependence_list_and_free (insn, &reg_last->sets,
1063 REG_DEP_OUTPUT);
1064 add_dependence_list_and_free (insn, &reg_last->uses,
1065 REG_DEP_ANTI);
1066 add_dependence_list_and_free (insn, &reg_last->clobbers,
1067 REG_DEP_OUTPUT);
1068 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1069 reg_last->clobbers_length = 0;
1070 reg_last->uses_length = 0;
1072 else
1074 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1075 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1077 reg_last->clobbers_length++;
1078 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1080 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1082 struct deps_reg *reg_last = &deps->reg_last[i];
1083 add_dependence_list_and_free (insn, &reg_last->sets,
1084 REG_DEP_OUTPUT);
1085 add_dependence_list_and_free (insn, &reg_last->clobbers,
1086 REG_DEP_OUTPUT);
1087 add_dependence_list_and_free (insn, &reg_last->uses,
1088 REG_DEP_ANTI);
1089 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1090 reg_last->uses_length = 0;
1091 reg_last->clobbers_length = 0;
1095 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1096 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1097 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1099 CLEAR_REG_SET (reg_pending_uses);
1100 CLEAR_REG_SET (reg_pending_clobbers);
1101 CLEAR_REG_SET (reg_pending_sets);
1103 /* If we are currently in a libcall scheduling group, then mark the
1104 current insn as being in a scheduling group and that it can not
1105 be moved into a different basic block. */
1107 if (deps->libcall_block_tail_insn)
1109 set_sched_group_p (insn);
1110 CANT_MOVE (insn) = 1;
1113 /* If a post-call group is still open, see if it should remain so.
1114 This insn must be a simple move of a hard reg to a pseudo or
1115 vice-versa.
1117 We must avoid moving these insns for correctness on
1118 SMALL_REGISTER_CLASS machines, and for special registers like
1119 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1120 hard regs for all targets. */
1122 if (deps->in_post_call_group_p)
1124 rtx tmp, set = single_set (insn);
1125 int src_regno, dest_regno;
1127 if (set == NULL)
1128 goto end_call_group;
1130 tmp = SET_DEST (set);
1131 if (GET_CODE (tmp) == SUBREG)
1132 tmp = SUBREG_REG (tmp);
1133 if (GET_CODE (tmp) == REG)
1134 dest_regno = REGNO (tmp);
1135 else
1136 goto end_call_group;
1138 tmp = SET_SRC (set);
1139 if (GET_CODE (tmp) == SUBREG)
1140 tmp = SUBREG_REG (tmp);
1141 if (GET_CODE (tmp) == REG)
1142 src_regno = REGNO (tmp);
1143 else
1144 goto end_call_group;
1146 if (src_regno < FIRST_PSEUDO_REGISTER
1147 || dest_regno < FIRST_PSEUDO_REGISTER)
1149 set_sched_group_p (insn);
1150 CANT_MOVE (insn) = 1;
1152 else
1154 end_call_group:
1155 deps->in_post_call_group_p = false;
1160 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1161 for every dependency. */
1163 void
1164 sched_analyze (deps, head, tail)
1165 struct deps *deps;
1166 rtx head, tail;
1168 rtx insn;
1169 rtx loop_notes = 0;
1171 if (current_sched_info->use_cselib)
1172 cselib_init ();
1174 for (insn = head;; insn = NEXT_INSN (insn))
1176 rtx link, end_seq, r0, set;
1178 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1180 /* Clear out the stale LOG_LINKS from flow. */
1181 free_INSN_LIST_list (&LOG_LINKS (insn));
1183 /* Make each JUMP_INSN a scheduling barrier for memory
1184 references. */
1185 if (GET_CODE (insn) == JUMP_INSN)
1187 /* Keep the list a reasonable size. */
1188 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1189 flush_pending_lists (deps, insn, true, true);
1190 else
1191 deps->last_pending_memory_flush
1192 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1194 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1195 loop_notes = 0;
1197 else if (GET_CODE (insn) == CALL_INSN)
1199 int i;
1201 CANT_MOVE (insn) = 1;
1203 /* Clear out the stale LOG_LINKS from flow. */
1204 free_INSN_LIST_list (&LOG_LINKS (insn));
1206 if (find_reg_note (insn, REG_SETJMP, NULL))
1208 /* This is setjmp. Assume that all registers, not just
1209 hard registers, may be clobbered by this call. */
1210 reg_pending_barrier = MOVE_BARRIER;
1212 else
1214 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1215 /* A call may read and modify global register variables. */
1216 if (global_regs[i])
1218 SET_REGNO_REG_SET (reg_pending_sets, i);
1219 SET_REGNO_REG_SET (reg_pending_uses, i);
1221 /* Other call-clobbered hard regs may be clobbered.
1222 Since we only have a choice between 'might be clobbered'
1223 and 'definitely not clobbered', we must include all
1224 partly call-clobbered registers here. */
1225 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1226 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1227 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1228 /* We don't know what set of fixed registers might be used
1229 by the function, but it is certain that the stack pointer
1230 is among them, but be conservative. */
1231 else if (fixed_regs[i])
1232 SET_REGNO_REG_SET (reg_pending_uses, i);
1233 /* The frame pointer is normally not used by the function
1234 itself, but by the debugger. */
1235 /* ??? MIPS o32 is an exception. It uses the frame pointer
1236 in the macro expansion of jal but does not represent this
1237 fact in the call_insn rtl. */
1238 else if (i == FRAME_POINTER_REGNUM
1239 || (i == HARD_FRAME_POINTER_REGNUM
1240 && (! reload_completed || frame_pointer_needed)))
1241 SET_REGNO_REG_SET (reg_pending_uses, i);
1244 /* For each insn which shouldn't cross a call, add a dependence
1245 between that insn and this call insn. */
1246 add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1247 REG_DEP_ANTI);
1249 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1250 loop_notes = 0;
1252 /* In the absence of interprocedural alias analysis, we must flush
1253 all pending reads and writes, and start new dependencies starting
1254 from here. But only flush writes for constant calls (which may
1255 be passed a pointer to something we haven't written yet). */
1256 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1258 /* Remember the last function call for limiting lifetimes. */
1259 free_INSN_LIST_list (&deps->last_function_call);
1260 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1262 /* Before reload, begin a post-call group, so as to keep the
1263 lifetimes of hard registers correct. */
1264 if (! reload_completed)
1265 deps->in_post_call_group_p = true;
1268 /* See comments on reemit_notes as to why we do this.
1269 ??? Actually, the reemit_notes just say what is done, not why. */
1271 if (GET_CODE (insn) == NOTE
1272 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1273 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1274 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1275 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1277 rtx rtx_region;
1279 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1280 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1281 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1282 else
1283 rtx_region = GEN_INT (0);
1285 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1286 rtx_region,
1287 loop_notes);
1288 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1289 GEN_INT (NOTE_LINE_NUMBER (insn)),
1290 loop_notes);
1291 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1294 if (current_sched_info->use_cselib)
1295 cselib_process_insn (insn);
1297 /* Now that we have completed handling INSN, check and see if it is
1298 a CLOBBER beginning a libcall block. If it is, record the
1299 end of the libcall sequence.
1301 We want to schedule libcall blocks as a unit before reload. While
1302 this restricts scheduling, it preserves the meaning of a libcall
1303 block.
1305 As a side effect, we may get better code due to decreased register
1306 pressure as well as less chance of a foreign insn appearing in
1307 a libcall block. */
1308 if (!reload_completed
1309 /* Note we may have nested libcall sequences. We only care about
1310 the outermost libcall sequence. */
1311 && deps->libcall_block_tail_insn == 0
1312 /* The sequence must start with a clobber of a register. */
1313 && GET_CODE (insn) == INSN
1314 && GET_CODE (PATTERN (insn)) == CLOBBER
1315 && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
1316 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
1317 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1318 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1319 && (end_seq = XEXP (link, 0)) != 0
1320 /* The insn referenced by the REG_LIBCALL note must be a
1321 simple nop copy with the same destination as the register
1322 mentioned in the clobber. */
1323 && (set = single_set (end_seq)) != 0
1324 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1325 /* And finally the insn referenced by the REG_LIBCALL must
1326 also contain a REG_EQUAL note and a REG_RETVAL note. */
1327 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1328 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1329 deps->libcall_block_tail_insn = XEXP (link, 0);
1331 /* If we have reached the end of a libcall block, then close the
1332 block. */
1333 if (deps->libcall_block_tail_insn == insn)
1334 deps->libcall_block_tail_insn = 0;
1336 if (insn == tail)
1338 if (current_sched_info->use_cselib)
1339 cselib_finish ();
1340 return;
1343 abort ();
1347 /* The following function adds forward dependence (FROM, TO) with
1348 given DEP_TYPE. The forward dependence should be not exist before. */
1350 void
1351 add_forward_dependence (from, to, dep_type)
1352 rtx from;
1353 rtx to;
1354 enum reg_note dep_type;
1356 rtx new_link;
1358 #ifdef ENABLE_CHECKING
1359 /* If add_dependence is working properly there should never
1360 be notes, deleted insns or duplicates in the backward
1361 links. Thus we need not check for them here.
1363 However, if we have enabled checking we might as well go
1364 ahead and verify that add_dependence worked properly. */
1365 if (GET_CODE (from) == NOTE
1366 || INSN_DELETED_P (from)
1367 || (forward_dependency_cache != NULL
1368 && TEST_BIT (forward_dependency_cache[INSN_LUID (from)],
1369 INSN_LUID (to)))
1370 || (forward_dependency_cache == NULL
1371 && find_insn_list (to, INSN_DEPEND (from))))
1372 abort ();
1373 if (forward_dependency_cache != NULL)
1374 SET_BIT (forward_dependency_cache[INSN_LUID (from)],
1375 INSN_LUID (to));
1376 #endif
1378 new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
1380 PUT_REG_NOTE_KIND (new_link, dep_type);
1382 INSN_DEPEND (from) = new_link;
1383 INSN_DEP_COUNT (to) += 1;
1386 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1387 dependences from LOG_LINKS to build forward dependences in
1388 INSN_DEPEND. */
1390 void
1391 compute_forward_dependences (head, tail)
1392 rtx head, tail;
1394 rtx insn, link;
1395 rtx next_tail;
1397 next_tail = NEXT_INSN (tail);
1398 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1400 if (! INSN_P (insn))
1401 continue;
1403 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1404 add_forward_dependence (XEXP (link, 0), insn, REG_NOTE_KIND (link));
1408 /* Initialize variables for region data dependence analysis.
1409 n_bbs is the number of region blocks. */
1411 void
1412 init_deps (deps)
1413 struct deps *deps;
1415 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1417 deps->max_reg = max_reg;
1418 deps->reg_last = (struct deps_reg *)
1419 xcalloc (max_reg, sizeof (struct deps_reg));
1420 INIT_REG_SET (&deps->reg_last_in_use);
1422 deps->pending_read_insns = 0;
1423 deps->pending_read_mems = 0;
1424 deps->pending_write_insns = 0;
1425 deps->pending_write_mems = 0;
1426 deps->pending_lists_length = 0;
1427 deps->pending_flush_length = 0;
1428 deps->last_pending_memory_flush = 0;
1429 deps->last_function_call = 0;
1430 deps->sched_before_next_call = 0;
1431 deps->in_post_call_group_p = false;
1432 deps->libcall_block_tail_insn = 0;
1435 /* Free insn lists found in DEPS. */
1437 void
1438 free_deps (deps)
1439 struct deps *deps;
1441 int i;
1443 free_INSN_LIST_list (&deps->pending_read_insns);
1444 free_EXPR_LIST_list (&deps->pending_read_mems);
1445 free_INSN_LIST_list (&deps->pending_write_insns);
1446 free_EXPR_LIST_list (&deps->pending_write_mems);
1447 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1449 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1450 times. For a test case with 42000 regs and 8000 small basic blocks,
1451 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1452 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1454 struct deps_reg *reg_last = &deps->reg_last[i];
1455 if (reg_last->uses)
1456 free_INSN_LIST_list (&reg_last->uses);
1457 if (reg_last->sets)
1458 free_INSN_LIST_list (&reg_last->sets);
1459 if (reg_last->clobbers)
1460 free_INSN_LIST_list (&reg_last->clobbers);
1462 CLEAR_REG_SET (&deps->reg_last_in_use);
1464 free (deps->reg_last);
1467 /* If it is profitable to use them, initialize caches for tracking
1468 dependency information. LUID is the number of insns to be scheduled,
1469 it is used in the estimate of profitability. */
1471 void
1472 init_dependency_caches (luid)
1473 int luid;
1475 /* ?!? We could save some memory by computing a per-region luid mapping
1476 which could reduce both the number of vectors in the cache and the size
1477 of each vector. Instead we just avoid the cache entirely unless the
1478 average number of instructions in a basic block is very high. See
1479 the comment before the declaration of true_dependency_cache for
1480 what we consider "very high". */
1481 if (luid / n_basic_blocks > 100 * 5)
1483 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1484 sbitmap_vector_zero (true_dependency_cache, luid);
1485 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1486 sbitmap_vector_zero (anti_dependency_cache, luid);
1487 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1488 sbitmap_vector_zero (output_dependency_cache, luid);
1489 #ifdef ENABLE_CHECKING
1490 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1491 sbitmap_vector_zero (forward_dependency_cache, luid);
1492 #endif
1496 /* Free the caches allocated in init_dependency_caches. */
1498 void
1499 free_dependency_caches ()
1501 if (true_dependency_cache)
1503 sbitmap_vector_free (true_dependency_cache);
1504 true_dependency_cache = NULL;
1505 sbitmap_vector_free (anti_dependency_cache);
1506 anti_dependency_cache = NULL;
1507 sbitmap_vector_free (output_dependency_cache);
1508 output_dependency_cache = NULL;
1509 #ifdef ENABLE_CHECKING
1510 sbitmap_vector_free (forward_dependency_cache);
1511 forward_dependency_cache = NULL;
1512 #endif
1516 /* Initialize some global variables needed by the dependency analysis
1517 code. */
1519 void
1520 init_deps_global ()
1522 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1523 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1524 reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
1525 reg_pending_barrier = NOT_A_BARRIER;
1528 /* Free everything used by the dependency analysis code. */
1530 void
1531 finish_deps_global ()
1533 FREE_REG_SET (reg_pending_sets);
1534 FREE_REG_SET (reg_pending_clobbers);
1535 FREE_REG_SET (reg_pending_uses);