PR libstdc++/3584
[official-gcc.git] / gcc / sched-deps.c
blobcf762cccb589233758865871f1182ecce9d03b56
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
44 extern char *reg_known_equiv_p;
45 extern rtx *reg_known_value;
47 static regset_head reg_pending_sets_head;
48 static regset_head reg_pending_clobbers_head;
49 static regset_head reg_pending_uses_head;
51 static regset reg_pending_sets;
52 static regset reg_pending_clobbers;
53 static regset reg_pending_uses;
54 static bool reg_pending_barrier;
56 /* To speed up the test for duplicate dependency links we keep a
57 record of dependencies created by add_dependence when the average
58 number of instructions in a basic block is very large.
60 Studies have shown that there is typically around 5 instructions between
61 branches for typical C code. So we can make a guess that the average
62 basic block is approximately 5 instructions long; we will choose 100X
63 the average size as a very large basic block.
65 Each insn has associated bitmaps for its dependencies. Each bitmap
66 has enough entries to represent a dependency on any other insn in
67 the insn chain. All bitmap for true dependencies cache is
68 allocated then the rest two ones are also allocated. */
69 static sbitmap *true_dependency_cache;
70 static sbitmap *anti_dependency_cache;
71 static sbitmap *output_dependency_cache;
73 /* To speed up checking consistency of formed forward insn
74 dependencies we use the following cache. Another possible solution
75 could be switching off checking duplication of insns in forward
76 dependencies. */
77 #ifdef ENABLE_CHECKING
78 static sbitmap *forward_dependency_cache;
79 #endif
81 static int deps_may_trap_p PARAMS ((rtx));
82 static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
83 static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
84 static void remove_dependence PARAMS ((rtx, rtx));
85 static void set_sched_group_p PARAMS ((rtx));
87 static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
88 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
89 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
90 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
91 static rtx group_leader PARAMS ((rtx));
93 static rtx get_condition PARAMS ((rtx));
94 static int conditions_mutex_p PARAMS ((rtx, rtx));
96 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
98 static int
99 deps_may_trap_p (mem)
100 rtx mem;
102 rtx addr = XEXP (mem, 0);
104 if (REG_P (addr)
105 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
106 && reg_known_value[REGNO (addr)])
107 addr = reg_known_value[REGNO (addr)];
108 return rtx_addr_can_trap_p (addr);
111 /* Return the INSN_LIST containing INSN in LIST, or NULL
112 if LIST does not contain INSN. */
115 find_insn_list (insn, list)
116 rtx insn;
117 rtx list;
119 while (list)
121 if (XEXP (list, 0) == insn)
122 return list;
123 list = XEXP (list, 1);
125 return 0;
128 /* Find the condition under which INSN is executed. */
130 static rtx
131 get_condition (insn)
132 rtx insn;
134 rtx pat = PATTERN (insn);
135 rtx cond;
137 if (pat == 0)
138 return 0;
139 if (GET_CODE (pat) == COND_EXEC)
140 return COND_EXEC_TEST (pat);
141 if (GET_CODE (insn) != JUMP_INSN)
142 return 0;
143 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
144 return 0;
145 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
146 return 0;
147 pat = SET_DEST (pat);
148 cond = XEXP (pat, 0);
149 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
150 && XEXP (cond, 2) == pc_rtx)
151 return cond;
152 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
153 && XEXP (cond, 1) == pc_rtx)
154 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
155 XEXP (cond, 0), XEXP (cond, 1));
156 else
157 return 0;
160 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
162 static int
163 conditions_mutex_p (cond1, cond2)
164 rtx cond1, cond2;
166 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
167 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
168 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
169 && XEXP (cond1, 0) == XEXP (cond2, 0)
170 && XEXP (cond1, 1) == XEXP (cond2, 1))
171 return 1;
172 return 0;
175 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
176 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
177 of dependence that this link represents. */
179 void
180 add_dependence (insn, elem, dep_type)
181 rtx insn;
182 rtx elem;
183 enum reg_note dep_type;
185 rtx link, next;
186 int present_p;
187 rtx cond1, cond2;
189 /* Don't depend an insn on itself. */
190 if (insn == elem)
191 return;
193 /* We can get a dependency on deleted insns due to optimizations in
194 the register allocation and reloading or due to splitting. Any
195 such dependency is useless and can be ignored. */
196 if (GET_CODE (elem) == NOTE)
197 return;
199 /* flow.c doesn't handle conditional lifetimes entirely correctly;
200 calls mess up the conditional lifetimes. */
201 /* ??? add_dependence is the wrong place to be eliding dependencies,
202 as that forgets that the condition expressions themselves may
203 be dependent. */
204 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
206 cond1 = get_condition (insn);
207 cond2 = get_condition (elem);
208 if (cond1 && cond2
209 && conditions_mutex_p (cond1, cond2)
210 /* Make sure first instruction doesn't affect condition of second
211 instruction if switched. */
212 && !modified_in_p (cond1, elem)
213 /* Make sure second instruction doesn't affect condition of first
214 instruction if switched. */
215 && !modified_in_p (cond2, insn))
216 return;
219 /* If elem is part of a sequence that must be scheduled together, then
220 make the dependence point to the last insn of the sequence.
221 When HAVE_cc0, it is possible for NOTEs to exist between users and
222 setters of the condition codes, so we must skip past notes here.
223 Otherwise, NOTEs are impossible here. */
224 next = next_nonnote_insn (elem);
225 if (next && INSN_P (next) && SCHED_GROUP_P (next))
227 /* Notes will never intervene here though, so don't bother checking
228 for them. */
229 /* Hah! Wrong. */
230 /* We must reject CODE_LABELs, so that we don't get confused by one
231 that has LABEL_PRESERVE_P set, which is represented by the same
232 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
233 SCHED_GROUP_P. */
235 rtx nnext;
236 while ((nnext = next_nonnote_insn (next)) != NULL
237 && INSN_P (nnext)
238 && SCHED_GROUP_P (nnext))
239 next = nnext;
241 /* Again, don't depend an insn on itself. */
242 if (insn == next)
243 return;
245 /* Make the dependence to NEXT, the last insn of the group, instead
246 of the original ELEM. */
247 elem = next;
250 present_p = 1;
251 #ifdef INSN_SCHEDULING
252 /* ??? No good way to tell from here whether we're doing interblock
253 scheduling. Possibly add another callback. */
254 #if 0
255 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
256 No need for interblock dependences with calls, since
257 calls are not moved between blocks. Note: the edge where
258 elem is a CALL is still required. */
259 if (GET_CODE (insn) == CALL_INSN
260 && (INSN_BB (elem) != INSN_BB (insn)))
261 return;
262 #endif
264 /* If we already have a dependency for ELEM, then we do not need to
265 do anything. Avoiding the list walk below can cut compile times
266 dramatically for some code. */
267 if (true_dependency_cache != NULL)
269 enum reg_note present_dep_type = 0;
271 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
272 abort ();
273 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
274 /* Do nothing (present_set_type is already 0). */
276 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
277 INSN_LUID (elem)))
278 present_dep_type = REG_DEP_ANTI;
279 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
280 INSN_LUID (elem)))
281 present_dep_type = REG_DEP_OUTPUT;
282 else
283 present_p = 0;
284 if (present_p && (int) dep_type >= (int) present_dep_type)
285 return;
287 #endif
289 /* Check that we don't already have this dependence. */
290 if (present_p)
291 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
292 if (XEXP (link, 0) == elem)
294 #ifdef INSN_SCHEDULING
295 /* Clear corresponding cache entry because type of the link
296 may be changed. */
297 if (true_dependency_cache != NULL)
299 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
300 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
301 INSN_LUID (elem));
302 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
303 && output_dependency_cache)
304 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
305 INSN_LUID (elem));
306 else
307 abort ();
309 #endif
311 /* If this is a more restrictive type of dependence than the existing
312 one, then change the existing dependence to this type. */
313 if ((int) dep_type < (int) REG_NOTE_KIND (link))
314 PUT_REG_NOTE_KIND (link, dep_type);
316 #ifdef INSN_SCHEDULING
317 /* If we are adding a dependency to INSN's LOG_LINKs, then
318 note that in the bitmap caches of dependency information. */
319 if (true_dependency_cache != NULL)
321 if ((int) REG_NOTE_KIND (link) == 0)
322 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
323 INSN_LUID (elem));
324 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
325 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
326 INSN_LUID (elem));
327 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
328 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
329 INSN_LUID (elem));
331 #endif
332 return;
334 /* Might want to check one level of transitivity to save conses. */
336 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
337 LOG_LINKS (insn) = link;
339 /* Insn dependency, not data dependency. */
340 PUT_REG_NOTE_KIND (link, dep_type);
342 #ifdef INSN_SCHEDULING
343 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
344 in the bitmap caches of dependency information. */
345 if (true_dependency_cache != NULL)
347 if ((int) dep_type == 0)
348 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
349 else if (dep_type == REG_DEP_ANTI)
350 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
351 else if (dep_type == REG_DEP_OUTPUT)
352 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
354 #endif
357 /* A convenience wrapper to operate on an entire list. */
359 static void
360 add_dependence_list (insn, list, dep_type)
361 rtx insn, list;
362 enum reg_note dep_type;
364 for (; list; list = XEXP (list, 1))
365 add_dependence (insn, XEXP (list, 0), dep_type);
368 /* Similar, but free *LISTP at the same time. */
370 static void
371 add_dependence_list_and_free (insn, listp, dep_type)
372 rtx insn;
373 rtx *listp;
374 enum reg_note dep_type;
376 rtx list, next;
377 for (list = *listp, *listp = NULL; list ; list = next)
379 next = XEXP (list, 1);
380 add_dependence (insn, XEXP (list, 0), dep_type);
381 free_INSN_LIST_node (list);
385 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
386 of INSN. Abort if not found. */
388 static void
389 remove_dependence (insn, elem)
390 rtx insn;
391 rtx elem;
393 rtx prev, link, next;
394 int found = 0;
396 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
398 next = XEXP (link, 1);
399 if (XEXP (link, 0) == elem)
401 if (prev)
402 XEXP (prev, 1) = next;
403 else
404 LOG_LINKS (insn) = next;
406 #ifdef INSN_SCHEDULING
407 /* If we are removing a dependency from the LOG_LINKS list,
408 make sure to remove it from the cache too. */
409 if (true_dependency_cache != NULL)
411 if (REG_NOTE_KIND (link) == 0)
412 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
413 INSN_LUID (elem));
414 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
415 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
416 INSN_LUID (elem));
417 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
418 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
419 INSN_LUID (elem));
421 #endif
423 free_INSN_LIST_node (link);
425 found = 1;
427 else
428 prev = link;
431 if (!found)
432 abort ();
433 return;
436 /* Return an insn which represents a SCHED_GROUP, which is
437 the last insn in the group. */
439 static rtx
440 group_leader (insn)
441 rtx insn;
443 rtx prev;
447 prev = insn;
448 insn = next_nonnote_insn (insn);
450 while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
452 return prev;
455 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
456 goes along with that. */
458 static void
459 set_sched_group_p (insn)
460 rtx insn;
462 rtx link, prev;
464 SCHED_GROUP_P (insn) = 1;
466 /* There may be a note before this insn now, but all notes will
467 be removed before we actually try to schedule the insns, so
468 it won't cause a problem later. We must avoid it here though. */
469 prev = prev_nonnote_insn (insn);
471 /* Make a copy of all dependencies on the immediately previous insn,
472 and add to this insn. This is so that all the dependencies will
473 apply to the group. Remove an explicit dependence on this insn
474 as SCHED_GROUP_P now represents it. */
476 if (find_insn_list (prev, LOG_LINKS (insn)))
477 remove_dependence (insn, prev);
479 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
480 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
483 /* Process an insn's memory dependencies. There are four kinds of
484 dependencies:
486 (0) read dependence: read follows read
487 (1) true dependence: read follows write
488 (2) anti dependence: write follows read
489 (3) output dependence: write follows write
491 We are careful to build only dependencies which actually exist, and
492 use transitivity to avoid building too many links. */
494 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
495 The MEM is a memory reference contained within INSN, which we are saving
496 so that we can do memory aliasing on it. */
498 void
499 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
500 struct deps *deps;
501 rtx *insn_list, *mem_list, insn, mem;
503 rtx link;
505 link = alloc_INSN_LIST (insn, *insn_list);
506 *insn_list = link;
508 if (current_sched_info->use_cselib)
510 mem = shallow_copy_rtx (mem);
511 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
513 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
514 *mem_list = link;
516 deps->pending_lists_length++;
519 /* Make a dependency between every memory reference on the pending lists
520 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
521 dependencies for a read operation, similarly with FOR_WRITE. */
523 static void
524 flush_pending_lists (deps, insn, for_read, for_write)
525 struct deps *deps;
526 rtx insn;
527 int for_read, for_write;
529 if (for_write)
531 add_dependence_list_and_free (insn, &deps->pending_read_insns,
532 REG_DEP_ANTI);
533 free_EXPR_LIST_list (&deps->pending_read_mems);
536 add_dependence_list_and_free (insn, &deps->pending_write_insns,
537 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
538 free_EXPR_LIST_list (&deps->pending_write_mems);
539 deps->pending_lists_length = 0;
541 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
542 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
543 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
544 deps->pending_flush_length = 1;
547 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
548 rtx, X, creating all dependencies generated by the write to the
549 destination of X, and reads of everything mentioned. */
551 static void
552 sched_analyze_1 (deps, x, insn)
553 struct deps *deps;
554 rtx x;
555 rtx insn;
557 int regno;
558 rtx dest = XEXP (x, 0);
559 enum rtx_code code = GET_CODE (x);
561 if (dest == 0)
562 return;
564 if (GET_CODE (dest) == PARALLEL)
566 int i;
568 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
569 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
570 sched_analyze_1 (deps,
571 gen_rtx_CLOBBER (VOIDmode,
572 XEXP (XVECEXP (dest, 0, i), 0)),
573 insn);
575 if (GET_CODE (x) == SET)
576 sched_analyze_2 (deps, SET_SRC (x), insn);
577 return;
580 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
581 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
583 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
585 /* The second and third arguments are values read by this insn. */
586 sched_analyze_2 (deps, XEXP (dest, 1), insn);
587 sched_analyze_2 (deps, XEXP (dest, 2), insn);
589 dest = XEXP (dest, 0);
592 if (GET_CODE (dest) == REG)
594 regno = REGNO (dest);
596 /* A hard reg in a wide mode may really be multiple registers.
597 If so, mark all of them just like the first. */
598 if (regno < FIRST_PSEUDO_REGISTER)
600 int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
601 if (code == SET)
603 while (--i >= 0)
604 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
606 else
608 while (--i >= 0)
609 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
612 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
613 it does not reload. Ignore these as they have served their
614 purpose already. */
615 else if (regno >= deps->max_reg)
617 if (GET_CODE (PATTERN (insn)) != USE
618 && GET_CODE (PATTERN (insn)) != CLOBBER)
619 abort ();
621 else
623 if (code == SET)
624 SET_REGNO_REG_SET (reg_pending_sets, regno);
625 else
626 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
628 /* Pseudos that are REG_EQUIV to something may be replaced
629 by that during reloading. We need only add dependencies for
630 the address in the REG_EQUIV note. */
631 if (!reload_completed
632 && reg_known_equiv_p[regno]
633 && GET_CODE (reg_known_value[regno]) == MEM)
634 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
636 /* Don't let it cross a call after scheduling if it doesn't
637 already cross one. */
638 if (REG_N_CALLS_CROSSED (regno) == 0)
639 add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
642 else if (GET_CODE (dest) == MEM)
644 /* Writing memory. */
645 rtx t = dest;
647 if (current_sched_info->use_cselib)
649 t = shallow_copy_rtx (dest);
650 cselib_lookup (XEXP (t, 0), Pmode, 1);
651 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
654 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
656 /* Flush all pending reads and writes to prevent the pending lists
657 from getting any larger. Insn scheduling runs too slowly when
658 these lists get long. When compiling GCC with itself,
659 this flush occurs 8 times for sparc, and 10 times for m88k using
660 the default value of 32. */
661 flush_pending_lists (deps, insn, false, true);
663 else
665 rtx pending, pending_mem;
667 pending = deps->pending_read_insns;
668 pending_mem = deps->pending_read_mems;
669 while (pending)
671 if (anti_dependence (XEXP (pending_mem, 0), t))
672 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
674 pending = XEXP (pending, 1);
675 pending_mem = XEXP (pending_mem, 1);
678 pending = deps->pending_write_insns;
679 pending_mem = deps->pending_write_mems;
680 while (pending)
682 if (output_dependence (XEXP (pending_mem, 0), t))
683 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
685 pending = XEXP (pending, 1);
686 pending_mem = XEXP (pending_mem, 1);
689 add_dependence_list (insn, deps->last_pending_memory_flush,
690 REG_DEP_ANTI);
692 add_insn_mem_dependence (deps, &deps->pending_write_insns,
693 &deps->pending_write_mems, insn, dest);
695 sched_analyze_2 (deps, XEXP (dest, 0), insn);
698 /* Analyze reads. */
699 if (GET_CODE (x) == SET)
700 sched_analyze_2 (deps, SET_SRC (x), insn);
703 /* Analyze the uses of memory and registers in rtx X in INSN. */
705 static void
706 sched_analyze_2 (deps, x, insn)
707 struct deps *deps;
708 rtx x;
709 rtx insn;
711 int i;
712 int j;
713 enum rtx_code code;
714 const char *fmt;
716 if (x == 0)
717 return;
719 code = GET_CODE (x);
721 switch (code)
723 case CONST_INT:
724 case CONST_DOUBLE:
725 case CONST_VECTOR:
726 case SYMBOL_REF:
727 case CONST:
728 case LABEL_REF:
729 /* Ignore constants. Note that we must handle CONST_DOUBLE here
730 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
731 this does not mean that this insn is using cc0. */
732 return;
734 #ifdef HAVE_cc0
735 case CC0:
736 /* User of CC0 depends on immediately preceding insn. */
737 set_sched_group_p (insn);
738 return;
739 #endif
741 case REG:
743 int regno = REGNO (x);
744 if (regno < FIRST_PSEUDO_REGISTER)
746 int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
747 while (--i >= 0)
748 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
750 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
751 it does not reload. Ignore these as they have served their
752 purpose already. */
753 else if (regno >= deps->max_reg)
755 if (GET_CODE (PATTERN (insn)) != USE
756 && GET_CODE (PATTERN (insn)) != CLOBBER)
757 abort ();
759 else
761 SET_REGNO_REG_SET (reg_pending_uses, regno);
763 /* Pseudos that are REG_EQUIV to something may be replaced
764 by that during reloading. We need only add dependencies for
765 the address in the REG_EQUIV note. */
766 if (!reload_completed
767 && reg_known_equiv_p[regno]
768 && GET_CODE (reg_known_value[regno]) == MEM)
769 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
771 /* If the register does not already cross any calls, then add this
772 insn to the sched_before_next_call list so that it will still
773 not cross calls after scheduling. */
774 if (REG_N_CALLS_CROSSED (regno) == 0)
775 deps->sched_before_next_call
776 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
778 return;
781 case MEM:
783 /* Reading memory. */
784 rtx u;
785 rtx pending, pending_mem;
786 rtx t = x;
788 if (current_sched_info->use_cselib)
790 t = shallow_copy_rtx (t);
791 cselib_lookup (XEXP (t, 0), Pmode, 1);
792 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
794 pending = deps->pending_read_insns;
795 pending_mem = deps->pending_read_mems;
796 while (pending)
798 if (read_dependence (XEXP (pending_mem, 0), t))
799 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
801 pending = XEXP (pending, 1);
802 pending_mem = XEXP (pending_mem, 1);
805 pending = deps->pending_write_insns;
806 pending_mem = deps->pending_write_mems;
807 while (pending)
809 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
810 t, rtx_varies_p))
811 add_dependence (insn, XEXP (pending, 0), 0);
813 pending = XEXP (pending, 1);
814 pending_mem = XEXP (pending_mem, 1);
817 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
818 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
819 || deps_may_trap_p (x))
820 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
822 /* Always add these dependencies to pending_reads, since
823 this insn may be followed by a write. */
824 add_insn_mem_dependence (deps, &deps->pending_read_insns,
825 &deps->pending_read_mems, insn, x);
827 /* Take advantage of tail recursion here. */
828 sched_analyze_2 (deps, XEXP (x, 0), insn);
829 return;
832 /* Force pending stores to memory in case a trap handler needs them. */
833 case TRAP_IF:
834 flush_pending_lists (deps, insn, true, false);
835 break;
837 case ASM_OPERANDS:
838 case ASM_INPUT:
839 case UNSPEC_VOLATILE:
841 /* Traditional and volatile asm instructions must be considered to use
842 and clobber all hard registers, all pseudo-registers and all of
843 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
845 Consider for instance a volatile asm that changes the fpu rounding
846 mode. An insn should not be moved across this even if it only uses
847 pseudo-regs because it might give an incorrectly rounded result. */
848 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
849 reg_pending_barrier = true;
851 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
852 We can not just fall through here since then we would be confused
853 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
854 traditional asms unlike their normal usage. */
856 if (code == ASM_OPERANDS)
858 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
859 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
860 return;
862 break;
865 case PRE_DEC:
866 case POST_DEC:
867 case PRE_INC:
868 case POST_INC:
869 /* These both read and modify the result. We must handle them as writes
870 to get proper dependencies for following instructions. We must handle
871 them as reads to get proper dependencies from this to previous
872 instructions. Thus we need to pass them to both sched_analyze_1
873 and sched_analyze_2. We must call sched_analyze_2 first in order
874 to get the proper antecedent for the read. */
875 sched_analyze_2 (deps, XEXP (x, 0), insn);
876 sched_analyze_1 (deps, x, insn);
877 return;
879 case POST_MODIFY:
880 case PRE_MODIFY:
881 /* op0 = op0 + op1 */
882 sched_analyze_2 (deps, XEXP (x, 0), insn);
883 sched_analyze_2 (deps, XEXP (x, 1), insn);
884 sched_analyze_1 (deps, x, insn);
885 return;
887 default:
888 break;
891 /* Other cases: walk the insn. */
892 fmt = GET_RTX_FORMAT (code);
893 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
895 if (fmt[i] == 'e')
896 sched_analyze_2 (deps, XEXP (x, i), insn);
897 else if (fmt[i] == 'E')
898 for (j = 0; j < XVECLEN (x, i); j++)
899 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
903 /* Analyze an INSN with pattern X to find all dependencies. */
905 static void
906 sched_analyze_insn (deps, x, insn, loop_notes)
907 struct deps *deps;
908 rtx x, insn;
909 rtx loop_notes;
911 RTX_CODE code = GET_CODE (x);
912 rtx link;
913 int i;
915 if (code == COND_EXEC)
917 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
919 /* ??? Should be recording conditions so we reduce the number of
920 false dependencies. */
921 x = COND_EXEC_CODE (x);
922 code = GET_CODE (x);
924 if (code == SET || code == CLOBBER)
926 sched_analyze_1 (deps, x, insn);
928 /* Bare clobber insns are used for letting life analysis, reg-stack
929 and others know that a value is dead. Depend on the last call
930 instruction so that reg-stack won't get confused. */
931 if (code == CLOBBER)
932 add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
934 else if (code == PARALLEL)
936 int i;
937 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
939 rtx sub = XVECEXP (x, 0, i);
940 code = GET_CODE (sub);
942 if (code == COND_EXEC)
944 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
945 sub = COND_EXEC_CODE (sub);
946 code = GET_CODE (sub);
948 if (code == SET || code == CLOBBER)
949 sched_analyze_1 (deps, sub, insn);
950 else
951 sched_analyze_2 (deps, sub, insn);
954 else
955 sched_analyze_2 (deps, x, insn);
957 /* Mark registers CLOBBERED or used by called function. */
958 if (GET_CODE (insn) == CALL_INSN)
960 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
962 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
963 sched_analyze_1 (deps, XEXP (link, 0), insn);
964 else
965 sched_analyze_2 (deps, XEXP (link, 0), insn);
967 if (find_reg_note (insn, REG_SETJMP, NULL))
968 reg_pending_barrier = true;
971 if (GET_CODE (insn) == JUMP_INSN)
973 rtx next;
974 next = next_nonnote_insn (insn);
975 if (next && GET_CODE (next) == BARRIER)
976 reg_pending_barrier = true;
977 else
979 rtx pending, pending_mem;
980 regset_head tmp;
981 INIT_REG_SET (&tmp);
983 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
984 IOR_REG_SET (reg_pending_uses, &tmp);
985 CLEAR_REG_SET (&tmp);
987 /* All memory writes and volatile reads must happen before the
988 jump. Non-volatile reads must happen before the jump iff
989 the result is needed by the above register used mask. */
991 pending = deps->pending_write_insns;
992 pending_mem = deps->pending_write_mems;
993 while (pending)
995 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
996 pending = XEXP (pending, 1);
997 pending_mem = XEXP (pending_mem, 1);
1000 pending = deps->pending_read_insns;
1001 pending_mem = deps->pending_read_mems;
1002 while (pending)
1004 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1005 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1006 pending = XEXP (pending, 1);
1007 pending_mem = XEXP (pending_mem, 1);
1010 add_dependence_list (insn, deps->last_pending_memory_flush,
1011 REG_DEP_ANTI);
1015 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1016 block, then we must be sure that no instructions are scheduled across it.
1017 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1018 become incorrect. */
1019 if (loop_notes)
1021 rtx link;
1023 /* Update loop_notes with any notes from this insn. Also determine
1024 if any of the notes on the list correspond to instruction scheduling
1025 barriers (loop, eh & setjmp notes, but not range notes). */
1026 link = loop_notes;
1027 while (XEXP (link, 1))
1029 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1030 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1031 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1032 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
1033 reg_pending_barrier = true;
1035 link = XEXP (link, 1);
1037 XEXP (link, 1) = REG_NOTES (insn);
1038 REG_NOTES (insn) = loop_notes;
1041 /* If this instruction can throw an exception, then moving it changes
1042 where block boundaries fall. This is mighty confusing elsewhere.
1043 Therefore, prevent such an instruction from being moved. */
1044 if (can_throw_internal (insn))
1045 reg_pending_barrier = true;
1047 /* Add dependencies if a scheduling barrier was found. */
1048 if (reg_pending_barrier)
1050 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1052 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1054 struct deps_reg *reg_last = &deps->reg_last[i];
1055 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1056 add_dependence_list (insn, reg_last->sets, 0);
1057 add_dependence_list (insn, reg_last->clobbers, 0);
1060 else
1062 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1064 struct deps_reg *reg_last = &deps->reg_last[i];
1065 add_dependence_list_and_free (insn, &reg_last->uses,
1066 REG_DEP_ANTI);
1067 add_dependence_list_and_free (insn, &reg_last->sets, 0);
1068 add_dependence_list_and_free (insn, &reg_last->clobbers, 0);
1069 reg_last->uses_length = 0;
1070 reg_last->clobbers_length = 0;
1074 for (i = 0; i < deps->max_reg; i++)
1076 struct deps_reg *reg_last = &deps->reg_last[i];
1077 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1078 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1081 flush_pending_lists (deps, insn, true, true);
1082 reg_pending_barrier = false;
1084 else
1086 /* If the current insn is conditional, we can't free any
1087 of the lists. */
1088 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1090 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1092 struct deps_reg *reg_last = &deps->reg_last[i];
1093 add_dependence_list (insn, reg_last->sets, 0);
1094 add_dependence_list (insn, reg_last->clobbers, 0);
1095 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1096 reg_last->uses_length++;
1098 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1100 struct deps_reg *reg_last = &deps->reg_last[i];
1101 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1102 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1103 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1104 reg_last->clobbers_length++;
1106 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1108 struct deps_reg *reg_last = &deps->reg_last[i];
1109 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1110 add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1111 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1112 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1115 else
1117 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1119 struct deps_reg *reg_last = &deps->reg_last[i];
1120 add_dependence_list (insn, reg_last->sets, 0);
1121 add_dependence_list (insn, reg_last->clobbers, 0);
1122 reg_last->uses_length++;
1123 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1125 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1127 struct deps_reg *reg_last = &deps->reg_last[i];
1128 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1129 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1131 add_dependence_list_and_free (insn, &reg_last->sets,
1132 REG_DEP_OUTPUT);
1133 add_dependence_list_and_free (insn, &reg_last->uses,
1134 REG_DEP_ANTI);
1135 add_dependence_list_and_free (insn, &reg_last->clobbers,
1136 REG_DEP_OUTPUT);
1137 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1138 reg_last->clobbers_length = 0;
1139 reg_last->uses_length = 0;
1141 else
1143 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1144 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1146 reg_last->clobbers_length++;
1147 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1149 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1151 struct deps_reg *reg_last = &deps->reg_last[i];
1152 add_dependence_list_and_free (insn, &reg_last->sets,
1153 REG_DEP_OUTPUT);
1154 add_dependence_list_and_free (insn, &reg_last->clobbers,
1155 REG_DEP_OUTPUT);
1156 add_dependence_list_and_free (insn, &reg_last->uses,
1157 REG_DEP_ANTI);
1158 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1159 reg_last->uses_length = 0;
1160 reg_last->clobbers_length = 0;
1164 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1165 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1166 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1168 CLEAR_REG_SET (reg_pending_uses);
1169 CLEAR_REG_SET (reg_pending_clobbers);
1170 CLEAR_REG_SET (reg_pending_sets);
1172 /* If we are currently in a libcall scheduling group, then mark the
1173 current insn as being in a scheduling group and that it can not
1174 be moved into a different basic block. */
1176 if (deps->libcall_block_tail_insn)
1178 set_sched_group_p (insn);
1179 CANT_MOVE (insn) = 1;
1182 /* If a post-call group is still open, see if it should remain so.
1183 This insn must be a simple move of a hard reg to a pseudo or
1184 vice-versa.
1186 We must avoid moving these insns for correctness on
1187 SMALL_REGISTER_CLASS machines, and for special registers like
1188 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1189 hard regs for all targets. */
1191 if (deps->in_post_call_group_p)
1193 rtx tmp, set = single_set (insn);
1194 int src_regno, dest_regno;
1196 if (set == NULL)
1197 goto end_call_group;
1199 tmp = SET_DEST (set);
1200 if (GET_CODE (tmp) == SUBREG)
1201 tmp = SUBREG_REG (tmp);
1202 if (GET_CODE (tmp) == REG)
1203 dest_regno = REGNO (tmp);
1204 else
1205 goto end_call_group;
1207 tmp = SET_SRC (set);
1208 if (GET_CODE (tmp) == SUBREG)
1209 tmp = SUBREG_REG (tmp);
1210 if (GET_CODE (tmp) == REG)
1211 src_regno = REGNO (tmp);
1212 else
1213 goto end_call_group;
1215 if (src_regno < FIRST_PSEUDO_REGISTER
1216 || dest_regno < FIRST_PSEUDO_REGISTER)
1218 set_sched_group_p (insn);
1219 CANT_MOVE (insn) = 1;
1221 else
1223 end_call_group:
1224 deps->in_post_call_group_p = false;
1229 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1230 for every dependency. */
1232 void
1233 sched_analyze (deps, head, tail)
1234 struct deps *deps;
1235 rtx head, tail;
1237 rtx insn;
1238 rtx loop_notes = 0;
1240 if (current_sched_info->use_cselib)
1241 cselib_init ();
1243 for (insn = head;; insn = NEXT_INSN (insn))
1245 rtx link, end_seq, r0, set;
1247 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1249 /* Clear out the stale LOG_LINKS from flow. */
1250 free_INSN_LIST_list (&LOG_LINKS (insn));
1252 /* Make each JUMP_INSN a scheduling barrier for memory
1253 references. */
1254 if (GET_CODE (insn) == JUMP_INSN)
1256 /* Keep the list a reasonable size. */
1257 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1258 flush_pending_lists (deps, insn, true, true);
1259 else
1260 deps->last_pending_memory_flush
1261 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1263 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1264 loop_notes = 0;
1266 else if (GET_CODE (insn) == CALL_INSN)
1268 int i;
1270 CANT_MOVE (insn) = 1;
1272 /* Clear out the stale LOG_LINKS from flow. */
1273 free_INSN_LIST_list (&LOG_LINKS (insn));
1275 if (find_reg_note (insn, REG_SETJMP, NULL))
1277 /* This is setjmp. Assume that all registers, not just
1278 hard registers, may be clobbered by this call. */
1279 reg_pending_barrier = true;
1281 else
1283 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1284 /* A call may read and modify global register variables. */
1285 if (global_regs[i])
1287 SET_REGNO_REG_SET (reg_pending_sets, i);
1288 SET_REGNO_REG_SET (reg_pending_uses, i);
1290 /* Other call-clobbered hard regs may be clobbered. */
1291 else if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1292 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1293 /* We don't know what set of fixed registers might be used
1294 by the function, but it is certain that the stack pointer
1295 is among them, but be conservative. */
1296 else if (fixed_regs[i])
1297 SET_REGNO_REG_SET (reg_pending_uses, i);
1298 /* The frame pointer is normally not used by the function
1299 itself, but by the debugger. */
1300 /* ??? MIPS o32 is an exception. It uses the frame pointer
1301 in the macro expansion of jal but does not represent this
1302 fact in the call_insn rtl. */
1303 else if (i == FRAME_POINTER_REGNUM
1304 || (i == HARD_FRAME_POINTER_REGNUM
1305 && (! reload_completed || frame_pointer_needed)))
1306 SET_REGNO_REG_SET (reg_pending_uses, i);
1309 /* For each insn which shouldn't cross a call, add a dependence
1310 between that insn and this call insn. */
1311 add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1312 REG_DEP_ANTI);
1314 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1315 loop_notes = 0;
1317 /* In the absence of interprocedural alias analysis, we must flush
1318 all pending reads and writes, and start new dependencies starting
1319 from here. But only flush writes for constant calls (which may
1320 be passed a pointer to something we haven't written yet). */
1321 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1323 /* Remember the last function call for limiting lifetimes. */
1324 free_INSN_LIST_list (&deps->last_function_call);
1325 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1327 /* Before reload, begin a post-call group, so as to keep the
1328 lifetimes of hard registers correct. */
1329 if (! reload_completed)
1330 deps->in_post_call_group_p = true;
1333 /* See comments on reemit_notes as to why we do this.
1334 ??? Actually, the reemit_notes just say what is done, not why. */
1336 if (GET_CODE (insn) == NOTE
1337 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1338 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1339 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1340 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1342 rtx rtx_region;
1344 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1345 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1346 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1347 else
1348 rtx_region = GEN_INT (0);
1350 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1351 rtx_region,
1352 loop_notes);
1353 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1354 GEN_INT (NOTE_LINE_NUMBER (insn)),
1355 loop_notes);
1356 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1359 if (current_sched_info->use_cselib)
1360 cselib_process_insn (insn);
1362 /* Now that we have completed handling INSN, check and see if it is
1363 a CLOBBER beginning a libcall block. If it is, record the
1364 end of the libcall sequence.
1366 We want to schedule libcall blocks as a unit before reload. While
1367 this restricts scheduling, it preserves the meaning of a libcall
1368 block.
1370 As a side effect, we may get better code due to decreased register
1371 pressure as well as less chance of a foreign insn appearing in
1372 a libcall block. */
1373 if (!reload_completed
1374 /* Note we may have nested libcall sequences. We only care about
1375 the outermost libcall sequence. */
1376 && deps->libcall_block_tail_insn == 0
1377 /* The sequence must start with a clobber of a register. */
1378 && GET_CODE (insn) == INSN
1379 && GET_CODE (PATTERN (insn)) == CLOBBER
1380 && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
1381 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
1382 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1383 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1384 && (end_seq = XEXP (link, 0)) != 0
1385 /* The insn referenced by the REG_LIBCALL note must be a
1386 simple nop copy with the same destination as the register
1387 mentioned in the clobber. */
1388 && (set = single_set (end_seq)) != 0
1389 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1390 /* And finally the insn referenced by the REG_LIBCALL must
1391 also contain a REG_EQUAL note and a REG_RETVAL note. */
1392 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1393 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1394 deps->libcall_block_tail_insn = XEXP (link, 0);
1396 /* If we have reached the end of a libcall block, then close the
1397 block. */
1398 if (deps->libcall_block_tail_insn == insn)
1399 deps->libcall_block_tail_insn = 0;
1401 if (insn == tail)
1403 if (current_sched_info->use_cselib)
1404 cselib_finish ();
1405 return;
1408 abort ();
1411 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1412 dependences from LOG_LINKS to build forward dependences in
1413 INSN_DEPEND. */
1415 void
1416 compute_forward_dependences (head, tail)
1417 rtx head, tail;
1419 rtx insn, link;
1420 rtx next_tail;
1421 enum reg_note dep_type;
1423 next_tail = NEXT_INSN (tail);
1424 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1426 if (! INSN_P (insn))
1427 continue;
1429 insn = group_leader (insn);
1431 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1433 rtx x = group_leader (XEXP (link, 0));
1434 rtx new_link;
1436 if (x != XEXP (link, 0))
1437 continue;
1439 #ifdef ENABLE_CHECKING
1440 /* If add_dependence is working properly there should never
1441 be notes, deleted insns or duplicates in the backward
1442 links. Thus we need not check for them here.
1444 However, if we have enabled checking we might as well go
1445 ahead and verify that add_dependence worked properly. */
1446 if (GET_CODE (x) == NOTE
1447 || INSN_DELETED_P (x)
1448 || (forward_dependency_cache != NULL
1449 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1450 INSN_LUID (insn)))
1451 || (forward_dependency_cache == NULL
1452 && find_insn_list (insn, INSN_DEPEND (x))))
1453 abort ();
1454 if (forward_dependency_cache != NULL)
1455 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1456 INSN_LUID (insn));
1457 #endif
1459 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1461 dep_type = REG_NOTE_KIND (link);
1462 PUT_REG_NOTE_KIND (new_link, dep_type);
1464 INSN_DEPEND (x) = new_link;
1465 INSN_DEP_COUNT (insn) += 1;
1470 /* Initialize variables for region data dependence analysis.
1471 n_bbs is the number of region blocks. */
1473 void
1474 init_deps (deps)
1475 struct deps *deps;
1477 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1479 deps->max_reg = max_reg;
1480 deps->reg_last = (struct deps_reg *)
1481 xcalloc (max_reg, sizeof (struct deps_reg));
1482 INIT_REG_SET (&deps->reg_last_in_use);
1484 deps->pending_read_insns = 0;
1485 deps->pending_read_mems = 0;
1486 deps->pending_write_insns = 0;
1487 deps->pending_write_mems = 0;
1488 deps->pending_lists_length = 0;
1489 deps->pending_flush_length = 0;
1490 deps->last_pending_memory_flush = 0;
1491 deps->last_function_call = 0;
1492 deps->sched_before_next_call = 0;
1493 deps->in_post_call_group_p = false;
1494 deps->libcall_block_tail_insn = 0;
1497 /* Free insn lists found in DEPS. */
1499 void
1500 free_deps (deps)
1501 struct deps *deps;
1503 int i;
1505 free_INSN_LIST_list (&deps->pending_read_insns);
1506 free_EXPR_LIST_list (&deps->pending_read_mems);
1507 free_INSN_LIST_list (&deps->pending_write_insns);
1508 free_EXPR_LIST_list (&deps->pending_write_mems);
1509 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1511 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1512 times. For a test case with 42000 regs and 8000 small basic blocks,
1513 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1514 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1516 struct deps_reg *reg_last = &deps->reg_last[i];
1517 if (reg_last->uses)
1518 free_INSN_LIST_list (&reg_last->uses);
1519 if (reg_last->sets)
1520 free_INSN_LIST_list (&reg_last->sets);
1521 if (reg_last->clobbers)
1522 free_INSN_LIST_list (&reg_last->clobbers);
1524 CLEAR_REG_SET (&deps->reg_last_in_use);
1526 free (deps->reg_last);
1529 /* If it is profitable to use them, initialize caches for tracking
1530 dependency informatino. LUID is the number of insns to be scheduled,
1531 it is used in the estimate of profitability. */
1533 void
1534 init_dependency_caches (luid)
1535 int luid;
1537 /* ?!? We could save some memory by computing a per-region luid mapping
1538 which could reduce both the number of vectors in the cache and the size
1539 of each vector. Instead we just avoid the cache entirely unless the
1540 average number of instructions in a basic block is very high. See
1541 the comment before the declaration of true_dependency_cache for
1542 what we consider "very high". */
1543 if (luid / n_basic_blocks > 100 * 5)
1545 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1546 sbitmap_vector_zero (true_dependency_cache, luid);
1547 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1548 sbitmap_vector_zero (anti_dependency_cache, luid);
1549 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1550 sbitmap_vector_zero (output_dependency_cache, luid);
1551 #ifdef ENABLE_CHECKING
1552 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1553 sbitmap_vector_zero (forward_dependency_cache, luid);
1554 #endif
1558 /* Free the caches allocated in init_dependency_caches. */
1560 void
1561 free_dependency_caches ()
1563 if (true_dependency_cache)
1565 sbitmap_vector_free (true_dependency_cache);
1566 true_dependency_cache = NULL;
1567 sbitmap_vector_free (anti_dependency_cache);
1568 anti_dependency_cache = NULL;
1569 sbitmap_vector_free (output_dependency_cache);
1570 output_dependency_cache = NULL;
1571 #ifdef ENABLE_CHECKING
1572 sbitmap_vector_free (forward_dependency_cache);
1573 forward_dependency_cache = NULL;
1574 #endif
1578 /* Initialize some global variables needed by the dependency analysis
1579 code. */
1581 void
1582 init_deps_global ()
1584 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1585 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1586 reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
1587 reg_pending_barrier = false;
1590 /* Free everything used by the dependency analysis code. */
1592 void
1593 finish_deps_global ()
1595 FREE_REG_SET (reg_pending_sets);
1596 FREE_REG_SET (reg_pending_clobbers);
1597 FREE_REG_SET (reg_pending_uses);