1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008 Free Software Foundation, Inc.
4 Originally contributed by Michael P. Hayes
5 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
6 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
7 and Kenneth Zadeck (zadeck@naturalbridge.com).
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
28 The files in this collection (df*.c,df.h) provide a general framework
29 for solving dataflow problems. The global dataflow is performed using
30 a good implementation of iterative dataflow analysis.
32 The file df-problems.c provides problem instance for the most common
33 dataflow problems: reaching defs, upward exposed uses, live variables,
34 uninitialized variables, def-use chains, and use-def chains. However,
35 the interface allows other dataflow problems to be defined as well.
37 Dataflow analysis is available in most of the rtl backend (the parts
38 between pass_df_initialize and pass_df_finish). It is quite likely
39 that these boundaries will be expanded in the future. The only
40 requirement is that there be a correct control flow graph.
42 There are three variations of the live variable problem that are
43 available whenever dataflow is available. The LR problem finds the
44 areas that can reach a use of a variable, the UR problems finds the
45 areas that can be reached from a definition of a variable. The LIVE
46 problem finds the intersection of these two areas.
48 There are several optional problems. These can be enabled when they
49 are needed and disabled when they are not needed.
51 Dataflow problems are generally solved in three layers. The bottom
52 layer is called scanning where a data structure is built for each rtl
53 insn that describes the set of defs and uses of that insn. Scanning
54 is generally kept up to date, i.e. as the insns changes, the scanned
55 version of that insn changes also. There are various mechanisms for
56 making this happen and are described in the INCREMENTAL SCANNING
59 In the middle layer, basic blocks are scanned to produce transfer
60 functions which describe the effects of that block on the global
61 dataflow solution. The transfer functions are only rebuilt if the
62 some instruction within the block has changed.
64 The top layer is the dataflow solution itself. The dataflow solution
65 is computed by using an efficient iterative solver and the transfer
66 functions. The dataflow solution must be recomputed whenever the
67 control changes or if one of the transfer function changes.
72 Here is an example of using the dataflow routines.
74 df_[chain,live,note,rd]_add_problem (flags);
76 df_set_blocks (blocks);
82 df_finish_pass (false);
84 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
85 instance to struct df_problem, to the set of problems solved in this
86 instance of df. All calls to add a problem for a given instance of df
87 must occur before the first call to DF_ANALYZE.
89 Problems can be dependent on other problems. For instance, solving
90 def-use or use-def chains is dependent on solving reaching
91 definitions. As long as these dependencies are listed in the problem
92 definition, the order of adding the problems is not material.
93 Otherwise, the problems will be solved in the order of calls to
94 df_add_problem. Note that it is not necessary to have a problem. In
95 that case, df will just be used to do the scanning.
99 DF_SET_BLOCKS is an optional call used to define a region of the
100 function on which the analysis will be performed. The normal case is
101 to analyze the entire function and no call to df_set_blocks is made.
102 DF_SET_BLOCKS only effects the blocks that are effected when computing
103 the transfer functions and final solution. The insn level information
104 is always kept up to date.
106 When a subset is given, the analysis behaves as if the function only
107 contains those blocks and any edges that occur directly between the
108 blocks in the set. Care should be taken to call df_set_blocks right
109 before the call to analyze in order to eliminate the possibility that
110 optimizations that reorder blocks invalidate the bitvector.
112 DF_ANALYZE causes all of the defined problems to be (re)solved. When
113 DF_ANALYZE is completes, the IN and OUT sets for each basic block
114 contain the computer information. The DF_*_BB_INFO macros can be used
115 to access these bitvectors. All deferred rescannings are down before
116 the transfer functions are recomputed.
118 DF_DUMP can then be called to dump the information produce to some
119 file. This calls DF_DUMP_START, to print the information that is not
120 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
121 for each block to print the basic specific information. These parts
122 can all be called separately as part of a larger dump function.
125 DF_FINISH_PASS causes df_remove_problem to be called on all of the
126 optional problems. It also causes any insns whose scanning has been
127 deferred to be rescanned as well as clears all of the changeable flags.
128 Setting the pass manager TODO_df_finish flag causes this function to
129 be run. However, the pass manager will call df_finish_pass AFTER the
130 pass dumping has been done, so if you want to see the results of the
131 optional problems in the pass dumps, use the TODO flag rather than
132 calling the function yourself.
136 There are four ways of doing the incremental scanning:
138 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
139 df_bb_delete, df_insn_change_bb have been added to most of
140 the low level service functions that maintain the cfg and change
141 rtl. Calling and of these routines many cause some number of insns
144 For most modern rtl passes, this is certainly the easiest way to
145 manage rescanning the insns. This technique also has the advantage
146 that the scanning information is always correct and can be relied
147 upon even after changes have been made to the instructions. This
148 technique is contra indicated in several cases:
150 a) If def-use chains OR use-def chains (but not both) are built,
151 using this is SIMPLY WRONG. The problem is that when a ref is
152 deleted that is the target of an edge, there is not enough
153 information to efficiently find the source of the edge and
154 delete the edge. This leaves a dangling reference that may
157 b) If def-use chains AND use-def chains are built, this may
158 produce unexpected results. The problem is that the incremental
159 scanning of an insn does not know how to repair the chains that
160 point into an insn when the insn changes. So the incremental
161 scanning just deletes the chains that enter and exit the insn
162 being changed. The dangling reference issue in (a) is not a
163 problem here, but if the pass is depending on the chains being
164 maintained after insns have been modified, this technique will
165 not do the correct thing.
167 c) If the pass modifies insns several times, this incremental
168 updating may be expensive.
170 d) If the pass modifies all of the insns, as does register
171 allocation, it is simply better to rescan the entire function.
173 e) If the pass uses either non-standard or ancient techniques to
174 modify insns, automatic detection of the insns that need to be
175 rescanned may be impractical. Cse and regrename fall into this
178 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
179 df_insn_delete do not immediately change the insn but instead make
180 a note that the insn needs to be rescanned. The next call to
181 df_analyze, df_finish_pass, or df_process_deferred_rescans will
182 cause all of the pending rescans to be processed.
184 This is the technique of choice if either 1a, 1b, or 1c are issues
185 in the pass. In the case of 1a or 1b, a call to df_remove_problem
186 (df_chain) should be made before the next call to df_analyze or
187 df_process_deferred_rescans.
189 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
190 (This mode can be cleared by calling df_clear_flags
191 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
194 3) Total rescanning - In this mode the rescanning is disabled.
195 However, the df information associated with deleted insn is delete
196 at the time the insn is deleted. At the end of the pass, a call
197 must be made to df_insn_rescan_all. This method is used by the
198 register allocator since it generally changes each insn multiple
199 times (once for each ref) and does not need to make use of the
200 updated scanning information.
202 It is also currently used by two older passes (cse, and regrename)
203 which change insns in hard to track ways. It is hoped that this
204 will be fixed soon since this it is expensive to rescan all of the
205 insns when only a small number of them have really changed.
207 4) Do it yourself - In this mechanism, the pass updates the insns
208 itself using the low level df primitives. Currently no pass does
209 this, but it has the advantage that it is quite efficient given
210 that the pass generally has exact knowledge of what it is changing.
214 Scanning produces a `struct df_ref' data structure (ref) is allocated
215 for every register reference (def or use) and this records the insn
216 and bb the ref is found within. The refs are linked together in
217 chains of uses and defs for each insn and for each register. Each ref
218 also has a chain field that links all the use refs for a def or all
219 the def refs for a use. This is used to create use-def or def-use
222 Different optimizations have different needs. Ultimately, only
223 register allocation and schedulers should be using the bitmaps
224 produced for the live register and uninitialized register problems.
225 The rest of the backend should be upgraded to using and maintaining
226 the linked information such as def use or use def chains.
231 While incremental bitmaps are not worthwhile to maintain, incremental
232 chains may be perfectly reasonable. The fastest way to build chains
233 from scratch or after significant modifications is to build reaching
234 definitions (RD) and build the chains from this.
236 However, general algorithms for maintaining use-def or def-use chains
237 are not practical. The amount of work to recompute the chain any
238 chain after an arbitrary change is large. However, with a modest
239 amount of work it is generally possible to have the application that
240 uses the chains keep them up to date. The high level knowledge of
241 what is really happening is essential to crafting efficient
242 incremental algorithms.
244 As for the bit vector problems, there is no interface to give a set of
245 blocks over with to resolve the iteration. In general, restarting a
246 dataflow iteration is difficult and expensive. Again, the best way to
247 keep the dataflow information up to data (if this is really what is
248 needed) it to formulate a problem specific solution.
250 There are fine grained calls for creating and deleting references from
251 instructions in df-scan.c. However, these are not currently connected
252 to the engine that resolves the dataflow equations.
257 The basic object is a DF_REF (reference) and this may either be a
258 DEF (definition) or a USE of a register.
260 These are linked into a variety of lists; namely reg-def, reg-use,
261 insn-def, insn-use, def-use, and use-def lists. For example, the
262 reg-def lists contain all the locations that define a given register
263 while the insn-use lists contain all the locations that use a
266 Note that the reg-def and reg-use chains are generally short for
267 pseudos and long for the hard registers.
271 1) The df insn information is kept in an array of DF_INSN_INFO objects.
272 The array is indexed by insn uid, and every DF_REF points to the
273 DF_INSN_INFO object of the insn that contains the reference.
275 2) Each insn has three sets of refs, which are linked into one of three
276 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
277 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
278 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
279 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
280 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
281 The latter list are the list of references in REG_EQUAL or REG_EQUIV
282 notes. These macros produce a ref (or NULL), the rest of the list
283 can be obtained by traversal of the NEXT_REF field (accessed by the
284 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
285 the uses or refs in an instruction.
287 3) Each insn has a logical uid field (LUID) which is stored in the
288 DF_INSN_INFO object for the insn. The LUID field is accessed by
289 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
290 When properly set, the LUID is an integer that numbers each insn in
291 the basic block, in order from the start of the block.
292 The numbers are only correct after a call to df_analyze. They will
293 rot after insns are added deleted or moved round.
297 There are 4 ways to obtain access to refs:
299 1) References are divided into two categories, REAL and ARTIFICIAL.
301 REAL refs are associated with instructions.
303 ARTIFICIAL refs are associated with basic blocks. The heads of
304 these lists can be accessed by calling df_get_artificial_defs or
305 df_get_artificial_uses for the particular basic block.
307 Artificial defs and uses occur both at the beginning and ends of blocks.
309 For blocks that area at the destination of eh edges, the
310 artificial uses and defs occur at the beginning. The defs relate
311 to the registers specified in EH_RETURN_DATA_REGNO and the uses
312 relate to the registers specified in ED_USES. Logically these
313 defs and uses should really occur along the eh edge, but there is
314 no convenient way to do this. Artificial edges that occur at the
315 beginning of the block have the DF_REF_AT_TOP flag set.
317 Artificial uses occur at the end of all blocks. These arise from
318 the hard registers that are always live, such as the stack
319 register and are put there to keep the code from forgetting about
322 Artificial defs occur at the end of the entry block. These arise
323 from registers that are live at entry to the function.
325 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
326 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
328 All of the eq_uses, uses and defs associated with each pseudo or
329 hard register may be linked in a bidirectional chain. These are
330 called reg-use or reg_def chains. If the changeable flag
331 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
332 treated like uses. If it is not set they are ignored.
334 The first use, eq_use or def for a register can be obtained using
335 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
336 macros. Subsequent uses for the same regno can be obtained by
337 following the next_reg field of the ref. The number of elements in
338 each of the chains can be found by using the DF_REG_USE_COUNT,
339 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
341 In previous versions of this code, these chains were ordered. It
342 has not been practical to continue this practice.
344 3) If def-use or use-def chains are built, these can be traversed to
345 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
346 include the eq_uses. Otherwise these are ignored when building the
349 4) An array of all of the uses (and an array of all of the defs) can
350 be built. These arrays are indexed by the value in the id
351 structure. These arrays are only lazily kept up to date, and that
352 process can be expensive. To have these arrays built, call
353 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
354 has been set the array will contain the eq_uses. Otherwise these
355 are ignored when building the array and assigning the ids. Note
356 that the values in the id field of a ref may change across calls to
357 df_analyze or df_reorganize_defs or df_reorganize_uses.
359 If the only use of this array is to find all of the refs, it is
360 better to traverse all of the registers and then traverse all of
361 reg-use or reg-def chains.
365 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
366 both a use and a def. These are both marked read/write to show that they
367 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
368 will generate a use of reg 42 followed by a def of reg 42 (both marked
369 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
370 generates a use of reg 41 then a def of reg 41 (both marked read/write),
371 even though reg 41 is decremented before it is used for the memory
372 address in this second example.
374 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
375 for which the number of word_mode units covered by the outer mode is
376 smaller than that covered by the inner mode, invokes a read-modify-write
377 operation. We generate both a use and a def and again mark them
380 Paradoxical subreg writes do not leave a trace of the old content, so they
381 are write-only operations.
387 #include "coretypes.h"
391 #include "insn-config.h"
393 #include "function.h"
396 #include "alloc-pool.h"
398 #include "hard-reg-set.h"
399 #include "basic-block.h"
404 #include "tree-pass.h"
407 static void *df_get_bb_info (struct dataflow
*, unsigned int);
408 static void df_set_bb_info (struct dataflow
*, unsigned int, void *);
410 static void df_set_clean_cfg (void);
413 /* An obstack for bitmap not related to specific dataflow problems.
414 This obstack should e.g. be used for bitmaps with a short life time
415 such as temporary bitmaps. */
417 bitmap_obstack df_bitmap_obstack
;
420 /*----------------------------------------------------------------------------
421 Functions to create, destroy and manipulate an instance of df.
422 ----------------------------------------------------------------------------*/
426 /* Add PROBLEM (and any dependent problems) to the DF instance. */
429 df_add_problem (struct df_problem
*problem
)
431 struct dataflow
*dflow
;
434 /* First try to add the dependent problem. */
435 if (problem
->dependent_problem
)
436 df_add_problem (problem
->dependent_problem
);
438 /* Check to see if this problem has already been defined. If it
439 has, just return that instance, if not, add it to the end of the
441 dflow
= df
->problems_by_index
[problem
->id
];
445 /* Make a new one and add it to the end. */
446 dflow
= XCNEW (struct dataflow
);
447 dflow
->problem
= problem
;
448 dflow
->computed
= false;
449 dflow
->solutions_dirty
= true;
450 df
->problems_by_index
[dflow
->problem
->id
] = dflow
;
452 /* Keep the defined problems ordered by index. This solves the
453 problem that RI will use the information from UREC if UREC has
454 been defined, or from LIVE if LIVE is defined and otherwise LR.
455 However for this to work, the computation of RI must be pushed
456 after which ever of those problems is defined, but we do not
457 require any of those except for LR to have actually been
459 df
->num_problems_defined
++;
460 for (i
= df
->num_problems_defined
- 2; i
>= 0; i
--)
462 if (problem
->id
< df
->problems_in_order
[i
]->problem
->id
)
463 df
->problems_in_order
[i
+1] = df
->problems_in_order
[i
];
466 df
->problems_in_order
[i
+1] = dflow
;
470 df
->problems_in_order
[0] = dflow
;
474 /* Set the MASK flags in the DFLOW problem. The old flags are
475 returned. If a flag is not allowed to be changed this will fail if
476 checking is enabled. */
477 enum df_changeable_flags
478 df_set_flags (enum df_changeable_flags changeable_flags
)
480 enum df_changeable_flags old_flags
= df
->changeable_flags
;
481 df
->changeable_flags
|= changeable_flags
;
486 /* Clear the MASK flags in the DFLOW problem. The old flags are
487 returned. If a flag is not allowed to be changed this will fail if
488 checking is enabled. */
489 enum df_changeable_flags
490 df_clear_flags (enum df_changeable_flags changeable_flags
)
492 enum df_changeable_flags old_flags
= df
->changeable_flags
;
493 df
->changeable_flags
&= ~changeable_flags
;
498 /* Set the blocks that are to be considered for analysis. If this is
499 not called or is called with null, the entire function in
503 df_set_blocks (bitmap blocks
)
508 bitmap_print (dump_file
, blocks
, "setting blocks to analyze ", "\n");
509 if (df
->blocks_to_analyze
)
511 /* This block is called to change the focus from one subset
514 bitmap diff
= BITMAP_ALLOC (&df_bitmap_obstack
);
515 bitmap_and_compl (diff
, df
->blocks_to_analyze
, blocks
);
516 for (p
= 0; p
< df
->num_problems_defined
; p
++)
518 struct dataflow
*dflow
= df
->problems_in_order
[p
];
519 if (dflow
->optional_p
&& dflow
->problem
->reset_fun
)
520 dflow
->problem
->reset_fun (df
->blocks_to_analyze
);
521 else if (dflow
->problem
->free_blocks_on_set_blocks
)
524 unsigned int bb_index
;
526 EXECUTE_IF_SET_IN_BITMAP (diff
, 0, bb_index
, bi
)
528 basic_block bb
= BASIC_BLOCK (bb_index
);
531 void *bb_info
= df_get_bb_info (dflow
, bb_index
);
534 dflow
->problem
->free_bb_fun (bb
, bb_info
);
535 df_set_bb_info (dflow
, bb_index
, NULL
);
546 /* This block of code is executed to change the focus from
547 the entire function to a subset. */
548 bitmap blocks_to_reset
= NULL
;
550 for (p
= 0; p
< df
->num_problems_defined
; p
++)
552 struct dataflow
*dflow
= df
->problems_in_order
[p
];
553 if (dflow
->optional_p
&& dflow
->problem
->reset_fun
)
555 if (!blocks_to_reset
)
559 BITMAP_ALLOC (&df_bitmap_obstack
);
562 bitmap_set_bit (blocks_to_reset
, bb
->index
);
565 dflow
->problem
->reset_fun (blocks_to_reset
);
569 BITMAP_FREE (blocks_to_reset
);
571 df
->blocks_to_analyze
= BITMAP_ALLOC (&df_bitmap_obstack
);
573 bitmap_copy (df
->blocks_to_analyze
, blocks
);
574 df
->analyze_subset
= true;
578 /* This block is executed to reset the focus to the entire
581 fprintf (dump_file
, "clearing blocks_to_analyze\n");
582 if (df
->blocks_to_analyze
)
584 BITMAP_FREE (df
->blocks_to_analyze
);
585 df
->blocks_to_analyze
= NULL
;
587 df
->analyze_subset
= false;
590 /* Setting the blocks causes the refs to be unorganized since only
591 the refs in the blocks are seen. */
592 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE
);
593 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE
);
594 df_mark_solutions_dirty ();
598 /* Delete a DFLOW problem (and any problems that depend on this
602 df_remove_problem (struct dataflow
*dflow
)
604 struct df_problem
*problem
;
610 problem
= dflow
->problem
;
611 gcc_assert (problem
->remove_problem_fun
);
613 /* Delete any problems that depended on this problem first. */
614 for (i
= 0; i
< df
->num_problems_defined
; i
++)
615 if (df
->problems_in_order
[i
]->problem
->dependent_problem
== problem
)
616 df_remove_problem (df
->problems_in_order
[i
]);
618 /* Now remove this problem. */
619 for (i
= 0; i
< df
->num_problems_defined
; i
++)
620 if (df
->problems_in_order
[i
] == dflow
)
623 for (j
= i
+ 1; j
< df
->num_problems_defined
; j
++)
624 df
->problems_in_order
[j
-1] = df
->problems_in_order
[j
];
625 df
->problems_in_order
[j
-1] = NULL
;
626 df
->num_problems_defined
--;
630 (problem
->remove_problem_fun
) ();
631 df
->problems_by_index
[problem
->id
] = NULL
;
635 /* Remove all of the problems that are not permanent. Scanning, LR
636 and (at -O2 or higher) LIVE are permanent, the rest are removable.
637 Also clear all of the changeable_flags. */
640 df_finish_pass (bool verify ATTRIBUTE_UNUSED
)
645 #ifdef ENABLE_DF_CHECKING
646 enum df_changeable_flags saved_flags
;
652 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE
);
653 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE
);
655 #ifdef ENABLE_DF_CHECKING
656 saved_flags
= df
->changeable_flags
;
659 for (i
= 0; i
< df
->num_problems_defined
; i
++)
661 struct dataflow
*dflow
= df
->problems_in_order
[i
];
662 struct df_problem
*problem
= dflow
->problem
;
664 if (dflow
->optional_p
)
666 gcc_assert (problem
->remove_problem_fun
);
667 (problem
->remove_problem_fun
) ();
668 df
->problems_in_order
[i
] = NULL
;
669 df
->problems_by_index
[problem
->id
] = NULL
;
673 df
->num_problems_defined
-= removed
;
675 /* Clear all of the flags. */
676 df
->changeable_flags
= 0;
677 df_process_deferred_rescans ();
679 /* Set the focus back to the whole function. */
680 if (df
->blocks_to_analyze
)
682 BITMAP_FREE (df
->blocks_to_analyze
);
683 df
->blocks_to_analyze
= NULL
;
684 df_mark_solutions_dirty ();
685 df
->analyze_subset
= false;
688 #ifdef ENABLE_DF_CHECKING
689 /* Verification will fail in DF_NO_INSN_RESCAN. */
690 if (!(saved_flags
& DF_NO_INSN_RESCAN
))
692 df_lr_verify_transfer_functions ();
694 df_live_verify_transfer_functions ();
702 #ifdef ENABLE_CHECKING
704 df
->changeable_flags
|= DF_VERIFY_SCHEDULED
;
709 /* Set up the dataflow instance for the entire back end. */
712 rest_of_handle_df_initialize (void)
715 df
= XCNEW (struct df
);
716 df
->changeable_flags
= 0;
718 bitmap_obstack_initialize (&df_bitmap_obstack
);
720 /* Set this to a conservative value. Stack_ptr_mod will compute it
722 current_function_sp_is_unchanging
= 0;
724 df_scan_add_problem ();
725 df_scan_alloc (NULL
);
727 /* These three problems are permanent. */
728 df_lr_add_problem ();
730 df_live_add_problem ();
732 df
->postorder
= XNEWVEC (int, last_basic_block
);
733 df
->postorder_inverted
= XNEWVEC (int, last_basic_block
);
734 df
->n_blocks
= post_order_compute (df
->postorder
, true, true);
735 df
->n_blocks_inverted
= inverted_post_order_compute (df
->postorder_inverted
);
736 gcc_assert (df
->n_blocks
== df
->n_blocks_inverted
);
738 df
->hard_regs_live_count
= XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER
);
739 memset (df
->hard_regs_live_count
, 0,
740 sizeof (unsigned int) * FIRST_PSEUDO_REGISTER
);
743 /* After reload, some ports add certain bits to regs_ever_live so
744 this cannot be reset. */
745 df_compute_regs_ever_live (true);
747 df_compute_regs_ever_live (false);
759 struct rtl_opt_pass pass_df_initialize_opt
=
765 rest_of_handle_df_initialize
, /* execute */
768 0, /* static_pass_number */
770 0, /* properties_required */
771 0, /* properties_provided */
772 0, /* properties_destroyed */
773 0, /* todo_flags_start */
774 0 /* todo_flags_finish */
782 return optimize
== 0;
786 struct rtl_opt_pass pass_df_initialize_no_opt
=
791 gate_no_opt
, /* gate */
792 rest_of_handle_df_initialize
, /* execute */
795 0, /* static_pass_number */
797 0, /* properties_required */
798 0, /* properties_provided */
799 0, /* properties_destroyed */
800 0, /* todo_flags_start */
801 0 /* todo_flags_finish */
806 /* Free all the dataflow info and the DF structure. This should be
807 called from the df_finish macro which also NULLs the parm. */
810 rest_of_handle_df_finish (void)
816 for (i
= 0; i
< df
->num_problems_defined
; i
++)
818 struct dataflow
*dflow
= df
->problems_in_order
[i
];
819 dflow
->problem
->free_fun ();
823 free (df
->postorder
);
824 if (df
->postorder_inverted
)
825 free (df
->postorder_inverted
);
826 free (df
->hard_regs_live_count
);
830 bitmap_obstack_release (&df_bitmap_obstack
);
835 struct rtl_opt_pass pass_df_finish
=
839 "dfinish", /* name */
841 rest_of_handle_df_finish
, /* execute */
844 0, /* static_pass_number */
846 0, /* properties_required */
847 0, /* properties_provided */
848 0, /* properties_destroyed */
849 0, /* todo_flags_start */
850 0 /* todo_flags_finish */
858 /*----------------------------------------------------------------------------
859 The general data flow analysis engine.
860 ----------------------------------------------------------------------------*/
863 /* Helper function for df_worklist_dataflow.
864 Propagate the dataflow forward.
865 Given a BB_INDEX, do the dataflow propagation
866 and set bits on for successors in PENDING
867 if the out set of the dataflow has changed. */
870 df_worklist_propagate_forward (struct dataflow
*dataflow
,
872 unsigned *bbindex_to_postorder
,
878 basic_block bb
= BASIC_BLOCK (bb_index
);
880 /* Calculate <conf_op> of incoming edges. */
881 if (EDGE_COUNT (bb
->preds
) > 0)
882 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
884 if (TEST_BIT (considered
, e
->src
->index
))
885 dataflow
->problem
->con_fun_n (e
);
887 else if (dataflow
->problem
->con_fun_0
)
888 dataflow
->problem
->con_fun_0 (bb
);
890 if (dataflow
->problem
->trans_fun (bb_index
))
892 /* The out set of this block has changed.
893 Propagate to the outgoing blocks. */
894 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
896 unsigned ob_index
= e
->dest
->index
;
898 if (TEST_BIT (considered
, ob_index
))
899 bitmap_set_bit (pending
, bbindex_to_postorder
[ob_index
]);
905 /* Helper function for df_worklist_dataflow.
906 Propagate the dataflow backward. */
909 df_worklist_propagate_backward (struct dataflow
*dataflow
,
911 unsigned *bbindex_to_postorder
,
917 basic_block bb
= BASIC_BLOCK (bb_index
);
919 /* Calculate <conf_op> of incoming edges. */
920 if (EDGE_COUNT (bb
->succs
) > 0)
921 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
923 if (TEST_BIT (considered
, e
->dest
->index
))
924 dataflow
->problem
->con_fun_n (e
);
926 else if (dataflow
->problem
->con_fun_0
)
927 dataflow
->problem
->con_fun_0 (bb
);
929 if (dataflow
->problem
->trans_fun (bb_index
))
931 /* The out set of this block has changed.
932 Propagate to the outgoing blocks. */
933 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
935 unsigned ob_index
= e
->src
->index
;
937 if (TEST_BIT (considered
, ob_index
))
938 bitmap_set_bit (pending
, bbindex_to_postorder
[ob_index
]);
945 /* This will free "pending". */
947 df_worklist_dataflow_overeager (struct dataflow
*dataflow
,
950 int *blocks_in_postorder
,
951 unsigned *bbindex_to_postorder
)
953 enum df_flow_dir dir
= dataflow
->problem
->dir
;
956 while (!bitmap_empty_p (pending
))
962 index
= bitmap_first_set_bit (pending
);
963 bitmap_clear_bit (pending
, index
);
965 bb_index
= blocks_in_postorder
[index
];
967 if (dir
== DF_FORWARD
)
968 df_worklist_propagate_forward (dataflow
, bb_index
,
969 bbindex_to_postorder
,
970 pending
, considered
);
972 df_worklist_propagate_backward (dataflow
, bb_index
,
973 bbindex_to_postorder
,
974 pending
, considered
);
977 BITMAP_FREE (pending
);
979 /* Dump statistics. */
981 fprintf (dump_file
, "df_worklist_dataflow_overeager:"
982 "n_basic_blocks %d n_edges %d"
983 " count %d (%5.2g)\n",
984 n_basic_blocks
, n_edges
,
985 count
, count
/ (float)n_basic_blocks
);
989 df_worklist_dataflow_doublequeue (struct dataflow
*dataflow
,
992 int *blocks_in_postorder
,
993 unsigned *bbindex_to_postorder
)
995 enum df_flow_dir dir
= dataflow
->problem
->dir
;
997 bitmap worklist
= BITMAP_ALLOC (&df_bitmap_obstack
);
999 /* Double-queueing. Worklist is for the current iteration,
1000 and pending is for the next. */
1001 while (!bitmap_empty_p (pending
))
1003 /* Swap pending and worklist. */
1004 bitmap temp
= worklist
;
1014 index
= bitmap_first_set_bit (worklist
);
1015 bitmap_clear_bit (worklist
, index
);
1017 bb_index
= blocks_in_postorder
[index
];
1019 if (dir
== DF_FORWARD
)
1020 df_worklist_propagate_forward (dataflow
, bb_index
,
1021 bbindex_to_postorder
,
1022 pending
, considered
);
1024 df_worklist_propagate_backward (dataflow
, bb_index
,
1025 bbindex_to_postorder
,
1026 pending
, considered
);
1028 while (!bitmap_empty_p (worklist
));
1031 BITMAP_FREE (worklist
);
1032 BITMAP_FREE (pending
);
1034 /* Dump statistics. */
1036 fprintf (dump_file
, "df_worklist_dataflow_doublequeue:"
1037 "n_basic_blocks %d n_edges %d"
1038 " count %d (%5.2g)\n",
1039 n_basic_blocks
, n_edges
,
1040 dcount
, dcount
/ (float)n_basic_blocks
);
1043 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1044 with "n"-th bit representing the n-th block in the reverse-postorder order.
1045 This is so-called over-eager algorithm where it propagates
1046 changes on demand. This algorithm may visit blocks more than
1047 iterative method if there are deeply nested loops.
1048 Worklist algorithm works better than iterative algorithm
1049 for CFGs with no nested loops.
1050 In practice, the measurement shows worklist algorithm beats
1051 iterative algorithm by some margin overall.
1052 Note that this is slightly different from the traditional textbook worklist solver,
1053 in that the worklist is effectively sorted by the reverse postorder.
1054 For CFGs with no nested loops, this is optimal.
1056 The overeager algorithm while works well for typical inputs,
1057 it could degenerate into excessive iterations given CFGs with high loop nests
1058 and unstructured loops. To cap the excessive iteration on such case,
1059 we switch to double-queueing when the original algorithm seems to
1064 df_worklist_dataflow (struct dataflow
*dataflow
,
1065 bitmap blocks_to_consider
,
1066 int *blocks_in_postorder
,
1069 bitmap pending
= BITMAP_ALLOC (&df_bitmap_obstack
);
1070 sbitmap considered
= sbitmap_alloc (last_basic_block
);
1072 unsigned int *bbindex_to_postorder
;
1075 enum df_flow_dir dir
= dataflow
->problem
->dir
;
1077 gcc_assert (dir
!= DF_NONE
);
1079 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1080 bbindex_to_postorder
=
1081 (unsigned int *)xmalloc (last_basic_block
* sizeof (unsigned int));
1083 /* Initialize the array to an out-of-bound value. */
1084 for (i
= 0; i
< last_basic_block
; i
++)
1085 bbindex_to_postorder
[i
] = last_basic_block
;
1087 /* Initialize the considered map. */
1088 sbitmap_zero (considered
);
1089 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider
, 0, index
, bi
)
1091 SET_BIT (considered
, index
);
1094 /* Initialize the mapping of block index to postorder. */
1095 for (i
= 0; i
< n_blocks
; i
++)
1097 bbindex_to_postorder
[blocks_in_postorder
[i
]] = i
;
1098 /* Add all blocks to the worklist. */
1099 bitmap_set_bit (pending
, i
);
1102 /* Initialize the problem. */
1103 if (dataflow
->problem
->init_fun
)
1104 dataflow
->problem
->init_fun (blocks_to_consider
);
1106 /* Solve it. Determine the solving algorithm
1107 based on a simple heuristic. */
1108 if (n_edges
> PARAM_VALUE (PARAM_DF_DOUBLE_QUEUE_THRESHOLD_FACTOR
)
1111 /* High average connectivity, meaning dense graph
1112 with more likely deep nested loops
1113 or unstructured loops. */
1114 df_worklist_dataflow_doublequeue (dataflow
, pending
, considered
,
1115 blocks_in_postorder
,
1116 bbindex_to_postorder
);
1120 /* Most inputs fall into this case
1121 with relatively flat or structured CFG. */
1122 df_worklist_dataflow_overeager (dataflow
, pending
, considered
,
1123 blocks_in_postorder
,
1124 bbindex_to_postorder
);
1127 sbitmap_free (considered
);
1128 free (bbindex_to_postorder
);
1132 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1133 the order of the remaining entries. Returns the length of the resulting
1137 df_prune_to_subcfg (int list
[], unsigned len
, bitmap blocks
)
1141 for (act
= 0, last
= 0; act
< len
; act
++)
1142 if (bitmap_bit_p (blocks
, list
[act
]))
1143 list
[last
++] = list
[act
];
1149 /* Execute dataflow analysis on a single dataflow problem.
1151 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1152 examined or will be computed. For calls from DF_ANALYZE, this is
1153 the set of blocks that has been passed to DF_SET_BLOCKS.
1157 df_analyze_problem (struct dataflow
*dflow
,
1158 bitmap blocks_to_consider
,
1159 int *postorder
, int n_blocks
)
1161 timevar_push (dflow
->problem
->tv_id
);
1163 #ifdef ENABLE_DF_CHECKING
1164 if (dflow
->problem
->verify_start_fun
)
1165 dflow
->problem
->verify_start_fun ();
1168 /* (Re)Allocate the datastructures necessary to solve the problem. */
1169 if (dflow
->problem
->alloc_fun
)
1170 dflow
->problem
->alloc_fun (blocks_to_consider
);
1172 /* Set up the problem and compute the local information. */
1173 if (dflow
->problem
->local_compute_fun
)
1174 dflow
->problem
->local_compute_fun (blocks_to_consider
);
1176 /* Solve the equations. */
1177 if (dflow
->problem
->dataflow_fun
)
1178 dflow
->problem
->dataflow_fun (dflow
, blocks_to_consider
,
1179 postorder
, n_blocks
);
1181 /* Massage the solution. */
1182 if (dflow
->problem
->finalize_fun
)
1183 dflow
->problem
->finalize_fun (blocks_to_consider
);
1185 #ifdef ENABLE_DF_CHECKING
1186 if (dflow
->problem
->verify_end_fun
)
1187 dflow
->problem
->verify_end_fun ();
1190 timevar_pop (dflow
->problem
->tv_id
);
1192 dflow
->computed
= true;
1196 /* Analyze dataflow info for the basic blocks specified by the bitmap
1197 BLOCKS, or for the whole CFG if BLOCKS is zero. */
1202 bitmap current_all_blocks
= BITMAP_ALLOC (&df_bitmap_obstack
);
1207 free (df
->postorder
);
1208 if (df
->postorder_inverted
)
1209 free (df
->postorder_inverted
);
1210 df
->postorder
= XNEWVEC (int, last_basic_block
);
1211 df
->postorder_inverted
= XNEWVEC (int, last_basic_block
);
1212 df
->n_blocks
= post_order_compute (df
->postorder
, true, true);
1213 df
->n_blocks_inverted
= inverted_post_order_compute (df
->postorder_inverted
);
1215 /* These should be the same. */
1216 gcc_assert (df
->n_blocks
== df
->n_blocks_inverted
);
1218 /* We need to do this before the df_verify_all because this is
1219 not kept incrementally up to date. */
1220 df_compute_regs_ever_live (false);
1221 df_process_deferred_rescans ();
1224 fprintf (dump_file
, "df_analyze called\n");
1226 #ifndef ENABLE_DF_CHECKING
1227 if (df
->changeable_flags
& DF_VERIFY_SCHEDULED
)
1231 for (i
= 0; i
< df
->n_blocks
; i
++)
1232 bitmap_set_bit (current_all_blocks
, df
->postorder
[i
]);
1234 #ifdef ENABLE_CHECKING
1235 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1237 for (i
= 0; i
< df
->n_blocks_inverted
; i
++)
1238 gcc_assert (bitmap_bit_p (current_all_blocks
, df
->postorder_inverted
[i
]));
1241 /* Make sure that we have pruned any unreachable blocks from these
1243 if (df
->analyze_subset
)
1246 bitmap_and_into (df
->blocks_to_analyze
, current_all_blocks
);
1247 df
->n_blocks
= df_prune_to_subcfg (df
->postorder
,
1248 df
->n_blocks
, df
->blocks_to_analyze
);
1249 df
->n_blocks_inverted
= df_prune_to_subcfg (df
->postorder_inverted
,
1250 df
->n_blocks_inverted
,
1251 df
->blocks_to_analyze
);
1252 BITMAP_FREE (current_all_blocks
);
1257 df
->blocks_to_analyze
= current_all_blocks
;
1258 current_all_blocks
= NULL
;
1261 /* Skip over the DF_SCAN problem. */
1262 for (i
= 1; i
< df
->num_problems_defined
; i
++)
1264 struct dataflow
*dflow
= df
->problems_in_order
[i
];
1265 if (dflow
->solutions_dirty
)
1267 if (dflow
->problem
->dir
== DF_FORWARD
)
1268 df_analyze_problem (dflow
,
1269 df
->blocks_to_analyze
,
1270 df
->postorder_inverted
,
1271 df
->n_blocks_inverted
);
1273 df_analyze_problem (dflow
,
1274 df
->blocks_to_analyze
,
1282 BITMAP_FREE (df
->blocks_to_analyze
);
1283 df
->blocks_to_analyze
= NULL
;
1287 df_set_clean_cfg ();
1292 /* Return the number of basic blocks from the last call to df_analyze. */
1295 df_get_n_blocks (enum df_flow_dir dir
)
1297 gcc_assert (dir
!= DF_NONE
);
1299 if (dir
== DF_FORWARD
)
1301 gcc_assert (df
->postorder_inverted
);
1302 return df
->n_blocks_inverted
;
1305 gcc_assert (df
->postorder
);
1306 return df
->n_blocks
;
1310 /* Return a pointer to the array of basic blocks in the reverse postorder.
1311 Depending on the direction of the dataflow problem,
1312 it returns either the usual reverse postorder array
1313 or the reverse postorder of inverted traversal. */
1315 df_get_postorder (enum df_flow_dir dir
)
1317 gcc_assert (dir
!= DF_NONE
);
1319 if (dir
== DF_FORWARD
)
1321 gcc_assert (df
->postorder_inverted
);
1322 return df
->postorder_inverted
;
1324 gcc_assert (df
->postorder
);
1325 return df
->postorder
;
1328 static struct df_problem user_problem
;
1329 static struct dataflow user_dflow
;
1331 /* Interface for calling iterative dataflow with user defined
1332 confluence and transfer functions. All that is necessary is to
1333 supply DIR, a direction, CONF_FUN_0, a confluence function for
1334 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1335 confluence function, TRANS_FUN, the basic block transfer function,
1336 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1337 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1340 df_simple_dataflow (enum df_flow_dir dir
,
1341 df_init_function init_fun
,
1342 df_confluence_function_0 con_fun_0
,
1343 df_confluence_function_n con_fun_n
,
1344 df_transfer_function trans_fun
,
1345 bitmap blocks
, int * postorder
, int n_blocks
)
1347 memset (&user_problem
, 0, sizeof (struct df_problem
));
1348 user_problem
.dir
= dir
;
1349 user_problem
.init_fun
= init_fun
;
1350 user_problem
.con_fun_0
= con_fun_0
;
1351 user_problem
.con_fun_n
= con_fun_n
;
1352 user_problem
.trans_fun
= trans_fun
;
1353 user_dflow
.problem
= &user_problem
;
1354 df_worklist_dataflow (&user_dflow
, blocks
, postorder
, n_blocks
);
1359 /*----------------------------------------------------------------------------
1360 Functions to support limited incremental change.
1361 ----------------------------------------------------------------------------*/
1364 /* Get basic block info. */
1367 df_get_bb_info (struct dataflow
*dflow
, unsigned int index
)
1369 if (dflow
->block_info
== NULL
)
1371 if (index
>= dflow
->block_info_size
)
1373 return (struct df_scan_bb_info
*) dflow
->block_info
[index
];
1377 /* Set basic block info. */
1380 df_set_bb_info (struct dataflow
*dflow
, unsigned int index
,
1383 gcc_assert (dflow
->block_info
);
1384 dflow
->block_info
[index
] = bb_info
;
1388 /* Mark the solutions as being out of date. */
1391 df_mark_solutions_dirty (void)
1396 for (p
= 1; p
< df
->num_problems_defined
; p
++)
1397 df
->problems_in_order
[p
]->solutions_dirty
= true;
1402 /* Return true if BB needs it's transfer functions recomputed. */
1405 df_get_bb_dirty (basic_block bb
)
1408 return bitmap_bit_p (df_live
->out_of_date_transfer_functions
, bb
->index
);
1414 /* Mark BB as needing it's transfer functions as being out of
1418 df_set_bb_dirty (basic_block bb
)
1423 for (p
= 1; p
< df
->num_problems_defined
; p
++)
1425 struct dataflow
*dflow
= df
->problems_in_order
[p
];
1426 if (dflow
->out_of_date_transfer_functions
)
1427 bitmap_set_bit (dflow
->out_of_date_transfer_functions
, bb
->index
);
1429 df_mark_solutions_dirty ();
1434 /* Clear the dirty bits. This is called from places that delete
1437 df_clear_bb_dirty (basic_block bb
)
1440 for (p
= 1; p
< df
->num_problems_defined
; p
++)
1442 struct dataflow
*dflow
= df
->problems_in_order
[p
];
1443 if (dflow
->out_of_date_transfer_functions
)
1444 bitmap_clear_bit (dflow
->out_of_date_transfer_functions
, bb
->index
);
1447 /* Called from the rtl_compact_blocks to reorganize the problems basic
1451 df_compact_blocks (void)
1455 void **problem_temps
;
1456 int size
= last_basic_block
* sizeof (void *);
1457 bitmap tmp
= BITMAP_ALLOC (&df_bitmap_obstack
);
1458 problem_temps
= XNEWVAR (void *, size
);
1460 for (p
= 0; p
< df
->num_problems_defined
; p
++)
1462 struct dataflow
*dflow
= df
->problems_in_order
[p
];
1464 /* Need to reorganize the out_of_date_transfer_functions for the
1466 if (dflow
->out_of_date_transfer_functions
)
1468 bitmap_copy (tmp
, dflow
->out_of_date_transfer_functions
);
1469 bitmap_clear (dflow
->out_of_date_transfer_functions
);
1470 if (bitmap_bit_p (tmp
, ENTRY_BLOCK
))
1471 bitmap_set_bit (dflow
->out_of_date_transfer_functions
, ENTRY_BLOCK
);
1472 if (bitmap_bit_p (tmp
, EXIT_BLOCK
))
1473 bitmap_set_bit (dflow
->out_of_date_transfer_functions
, EXIT_BLOCK
);
1475 i
= NUM_FIXED_BLOCKS
;
1478 if (bitmap_bit_p (tmp
, bb
->index
))
1479 bitmap_set_bit (dflow
->out_of_date_transfer_functions
, i
);
1484 /* Now shuffle the block info for the problem. */
1485 if (dflow
->problem
->free_bb_fun
)
1487 df_grow_bb_info (dflow
);
1488 memcpy (problem_temps
, dflow
->block_info
, size
);
1490 /* Copy the bb info from the problem tmps to the proper
1491 place in the block_info vector. Null out the copied
1492 item. The entry and exit blocks never move. */
1493 i
= NUM_FIXED_BLOCKS
;
1496 df_set_bb_info (dflow
, i
, problem_temps
[bb
->index
]);
1497 problem_temps
[bb
->index
] = NULL
;
1500 memset (dflow
->block_info
+ i
, 0,
1501 (last_basic_block
- i
) *sizeof (void *));
1503 /* Free any block infos that were not copied (and NULLed).
1504 These are from orphaned blocks. */
1505 for (i
= NUM_FIXED_BLOCKS
; i
< last_basic_block
; i
++)
1507 basic_block bb
= BASIC_BLOCK (i
);
1508 if (problem_temps
[i
] && bb
)
1509 dflow
->problem
->free_bb_fun
1510 (bb
, problem_temps
[i
]);
1515 /* Shuffle the bits in the basic_block indexed arrays. */
1517 if (df
->blocks_to_analyze
)
1519 if (bitmap_bit_p (tmp
, ENTRY_BLOCK
))
1520 bitmap_set_bit (df
->blocks_to_analyze
, ENTRY_BLOCK
);
1521 if (bitmap_bit_p (tmp
, EXIT_BLOCK
))
1522 bitmap_set_bit (df
->blocks_to_analyze
, EXIT_BLOCK
);
1523 bitmap_copy (tmp
, df
->blocks_to_analyze
);
1524 bitmap_clear (df
->blocks_to_analyze
);
1525 i
= NUM_FIXED_BLOCKS
;
1528 if (bitmap_bit_p (tmp
, bb
->index
))
1529 bitmap_set_bit (df
->blocks_to_analyze
, i
);
1536 free (problem_temps
);
1538 i
= NUM_FIXED_BLOCKS
;
1541 SET_BASIC_BLOCK (i
, bb
);
1546 gcc_assert (i
== n_basic_blocks
);
1548 for (; i
< last_basic_block
; i
++)
1549 SET_BASIC_BLOCK (i
, NULL
);
1552 if (!df_lr
->solutions_dirty
)
1553 df_set_clean_cfg ();
1558 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1559 block. There is no excuse for people to do this kind of thing. */
1562 df_bb_replace (int old_index
, basic_block new_block
)
1564 int new_block_index
= new_block
->index
;
1568 fprintf (dump_file
, "shoving block %d into %d\n", new_block_index
, old_index
);
1571 gcc_assert (BASIC_BLOCK (old_index
) == NULL
);
1573 for (p
= 0; p
< df
->num_problems_defined
; p
++)
1575 struct dataflow
*dflow
= df
->problems_in_order
[p
];
1576 if (dflow
->block_info
)
1578 df_grow_bb_info (dflow
);
1579 gcc_assert (df_get_bb_info (dflow
, old_index
) == NULL
);
1580 df_set_bb_info (dflow
, old_index
,
1581 df_get_bb_info (dflow
, new_block_index
));
1585 df_clear_bb_dirty (new_block
);
1586 SET_BASIC_BLOCK (old_index
, new_block
);
1587 new_block
->index
= old_index
;
1588 df_set_bb_dirty (BASIC_BLOCK (old_index
));
1589 SET_BASIC_BLOCK (new_block_index
, NULL
);
1593 /* Free all of the per basic block dataflow from all of the problems.
1594 This is typically called before a basic block is deleted and the
1595 problem will be reanalyzed. */
1598 df_bb_delete (int bb_index
)
1600 basic_block bb
= BASIC_BLOCK (bb_index
);
1606 for (i
= 0; i
< df
->num_problems_defined
; i
++)
1608 struct dataflow
*dflow
= df
->problems_in_order
[i
];
1609 if (dflow
->problem
->free_bb_fun
)
1611 void *bb_info
= df_get_bb_info (dflow
, bb_index
);
1614 dflow
->problem
->free_bb_fun (bb
, bb_info
);
1615 df_set_bb_info (dflow
, bb_index
, NULL
);
1619 df_clear_bb_dirty (bb
);
1620 df_mark_solutions_dirty ();
1624 /* Verify that there is a place for everything and everything is in
1625 its place. This is too expensive to run after every pass in the
1626 mainline. However this is an excellent debugging tool if the
1627 dataflow information is not being updated properly. You can just
1628 sprinkle calls in until you find the place that is changing an
1629 underlying structure without calling the proper updating
1636 #ifdef ENABLE_DF_CHECKING
1637 df_lr_verify_transfer_functions ();
1639 df_live_verify_transfer_functions ();
1645 /* Compute an array of ints that describes the cfg. This can be used
1646 to discover places where the cfg is modified by the appropriate
1647 calls have not been made to the keep df informed. The internals of
1648 this are unexciting, the key is that two instances of this can be
1649 compared to see if any changes have been made to the cfg. */
1652 df_compute_cfg_image (void)
1655 int size
= 2 + (2 * n_basic_blocks
);
1661 size
+= EDGE_COUNT (bb
->succs
);
1664 map
= XNEWVEC (int, size
);
1672 map
[i
++] = bb
->index
;
1673 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1674 map
[i
++] = e
->dest
->index
;
1681 static int *saved_cfg
= NULL
;
1684 /* This function compares the saved version of the cfg with the
1685 current cfg and aborts if the two are identical. The function
1686 silently returns if the cfg has been marked as dirty or the two are
1690 df_check_cfg_clean (void)
1697 if (df_lr
->solutions_dirty
)
1700 if (saved_cfg
== NULL
)
1703 new_map
= df_compute_cfg_image ();
1704 gcc_assert (memcmp (saved_cfg
, new_map
, saved_cfg
[0] * sizeof (int)) == 0);
1709 /* This function builds a cfg fingerprint and squirrels it away in
1713 df_set_clean_cfg (void)
1717 saved_cfg
= df_compute_cfg_image ();
1720 #endif /* DF_DEBUG_CFG */
1721 /*----------------------------------------------------------------------------
1722 PUBLIC INTERFACES TO QUERY INFORMATION.
1723 ----------------------------------------------------------------------------*/
1726 /* Return first def of REGNO within BB. */
1729 df_bb_regno_first_def_find (basic_block bb
, unsigned int regno
)
1732 struct df_ref
**def_rec
;
1735 FOR_BB_INSNS (bb
, insn
)
1740 uid
= INSN_UID (insn
);
1741 for (def_rec
= DF_INSN_UID_DEFS (uid
); *def_rec
; def_rec
++)
1743 struct df_ref
*def
= *def_rec
;
1744 if (DF_REF_REGNO (def
) == regno
)
1752 /* Return last def of REGNO within BB. */
1755 df_bb_regno_last_def_find (basic_block bb
, unsigned int regno
)
1758 struct df_ref
**def_rec
;
1761 FOR_BB_INSNS_REVERSE (bb
, insn
)
1766 uid
= INSN_UID (insn
);
1767 for (def_rec
= DF_INSN_UID_DEFS (uid
); *def_rec
; def_rec
++)
1769 struct df_ref
*def
= *def_rec
;
1770 if (DF_REF_REGNO (def
) == regno
)
1778 /* Finds the reference corresponding to the definition of REG in INSN.
1779 DF is the dataflow object. */
1782 df_find_def (rtx insn
, rtx reg
)
1785 struct df_ref
**def_rec
;
1787 if (GET_CODE (reg
) == SUBREG
)
1788 reg
= SUBREG_REG (reg
);
1789 gcc_assert (REG_P (reg
));
1791 uid
= INSN_UID (insn
);
1792 for (def_rec
= DF_INSN_UID_DEFS (uid
); *def_rec
; def_rec
++)
1794 struct df_ref
*def
= *def_rec
;
1795 if (rtx_equal_p (DF_REF_REAL_REG (def
), reg
))
1803 /* Return true if REG is defined in INSN, zero otherwise. */
1806 df_reg_defined (rtx insn
, rtx reg
)
1808 return df_find_def (insn
, reg
) != NULL
;
1812 /* Finds the reference corresponding to the use of REG in INSN.
1813 DF is the dataflow object. */
1816 df_find_use (rtx insn
, rtx reg
)
1819 struct df_ref
**use_rec
;
1821 if (GET_CODE (reg
) == SUBREG
)
1822 reg
= SUBREG_REG (reg
);
1823 gcc_assert (REG_P (reg
));
1825 uid
= INSN_UID (insn
);
1826 for (use_rec
= DF_INSN_UID_USES (uid
); *use_rec
; use_rec
++)
1828 struct df_ref
*use
= *use_rec
;
1829 if (rtx_equal_p (DF_REF_REAL_REG (use
), reg
))
1832 if (df
->changeable_flags
& DF_EQ_NOTES
)
1833 for (use_rec
= DF_INSN_UID_EQ_USES (uid
); *use_rec
; use_rec
++)
1835 struct df_ref
*use
= *use_rec
;
1836 if (rtx_equal_p (DF_REF_REAL_REG (use
), reg
))
1843 /* Return true if REG is referenced in INSN, zero otherwise. */
1846 df_reg_used (rtx insn
, rtx reg
)
1848 return df_find_use (insn
, reg
) != NULL
;
1852 /*----------------------------------------------------------------------------
1853 Debugging and printing functions.
1854 ----------------------------------------------------------------------------*/
1857 /* Write information about registers and basic blocks into FILE.
1858 This is part of making a debugging dump. */
1861 df_print_regset (FILE *file
, bitmap r
)
1867 fputs (" (nil)", file
);
1870 EXECUTE_IF_SET_IN_BITMAP (r
, 0, i
, bi
)
1872 fprintf (file
, " %d", i
);
1873 if (i
< FIRST_PSEUDO_REGISTER
)
1874 fprintf (file
, " [%s]", reg_names
[i
]);
1877 fprintf (file
, "\n");
1881 /* Write information about registers and basic blocks into FILE. The
1882 bitmap is in the form used by df_byte_lr. This is part of making a
1886 df_print_byte_regset (FILE *file
, bitmap r
)
1888 unsigned int max_reg
= max_reg_num ();
1892 fputs (" (nil)", file
);
1896 for (i
= 0; i
< max_reg
; i
++)
1898 unsigned int first
= df_byte_lr_get_regno_start (i
);
1899 unsigned int len
= df_byte_lr_get_regno_len (i
);
1906 EXECUTE_IF_SET_IN_BITMAP (r
, first
, j
, bi
)
1908 found
= j
< first
+ len
;
1913 const char * sep
= "";
1914 fprintf (file
, " %d", i
);
1915 if (i
< FIRST_PSEUDO_REGISTER
)
1916 fprintf (file
, " [%s]", reg_names
[i
]);
1917 fprintf (file
, "(");
1918 EXECUTE_IF_SET_IN_BITMAP (r
, first
, j
, bi
)
1920 if (j
> first
+ len
- 1)
1922 fprintf (file
, "%s%d", sep
, j
-first
);
1925 fprintf (file
, ")");
1930 if (bitmap_bit_p (r
, first
))
1932 fprintf (file
, " %d", i
);
1933 if (i
< FIRST_PSEUDO_REGISTER
)
1934 fprintf (file
, " [%s]", reg_names
[i
]);
1940 fprintf (file
, "\n");
1944 /* Dump dataflow info. */
1947 df_dump (FILE *file
)
1950 df_dump_start (file
);
1954 df_print_bb_index (bb
, file
);
1955 df_dump_top (bb
, file
);
1956 df_dump_bottom (bb
, file
);
1959 fprintf (file
, "\n");
1963 /* Dump dataflow info for df->blocks_to_analyze. */
1966 df_dump_region (FILE *file
)
1968 if (df
->blocks_to_analyze
)
1971 unsigned int bb_index
;
1973 fprintf (file
, "\n\nstarting region dump\n");
1974 df_dump_start (file
);
1976 EXECUTE_IF_SET_IN_BITMAP (df
->blocks_to_analyze
, 0, bb_index
, bi
)
1978 basic_block bb
= BASIC_BLOCK (bb_index
);
1980 df_print_bb_index (bb
, file
);
1981 df_dump_top (bb
, file
);
1982 df_dump_bottom (bb
, file
);
1984 fprintf (file
, "\n");
1991 /* Dump the introductory information for each problem defined. */
1994 df_dump_start (FILE *file
)
2001 fprintf (file
, "\n\n%s\n", current_function_name ());
2002 fprintf (file
, "\nDataflow summary:\n");
2003 if (df
->blocks_to_analyze
)
2004 fprintf (file
, "def_info->table_size = %d, use_info->table_size = %d\n",
2005 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2007 for (i
= 0; i
< df
->num_problems_defined
; i
++)
2009 struct dataflow
*dflow
= df
->problems_in_order
[i
];
2010 if (dflow
->computed
)
2012 df_dump_problem_function fun
= dflow
->problem
->dump_start_fun
;
2020 /* Dump the top of the block information for BB. */
2023 df_dump_top (basic_block bb
, FILE *file
)
2030 for (i
= 0; i
< df
->num_problems_defined
; i
++)
2032 struct dataflow
*dflow
= df
->problems_in_order
[i
];
2033 if (dflow
->computed
)
2035 df_dump_bb_problem_function bbfun
= dflow
->problem
->dump_top_fun
;
2043 /* Dump the bottom of the block information for BB. */
2046 df_dump_bottom (basic_block bb
, FILE *file
)
2053 for (i
= 0; i
< df
->num_problems_defined
; i
++)
2055 struct dataflow
*dflow
= df
->problems_in_order
[i
];
2056 if (dflow
->computed
)
2058 df_dump_bb_problem_function bbfun
= dflow
->problem
->dump_bottom_fun
;
2067 df_refs_chain_dump (struct df_ref
**ref_rec
, bool follow_chain
, FILE *file
)
2069 fprintf (file
, "{ ");
2072 struct df_ref
*ref
= *ref_rec
;
2073 fprintf (file
, "%c%d(%d)",
2074 DF_REF_REG_DEF_P (ref
) ? 'd' : (DF_REF_FLAGS (ref
) & DF_REF_IN_NOTE
) ? 'e' : 'u',
2076 DF_REF_REGNO (ref
));
2078 df_chain_dump (DF_REF_CHAIN (ref
), file
);
2081 fprintf (file
, "}");
2085 /* Dump either a ref-def or reg-use chain. */
2088 df_regs_chain_dump (struct df_ref
*ref
, FILE *file
)
2090 fprintf (file
, "{ ");
2093 fprintf (file
, "%c%d(%d) ",
2094 DF_REF_REG_DEF_P (ref
) ? 'd' : 'u',
2096 DF_REF_REGNO (ref
));
2097 ref
= ref
->next_reg
;
2099 fprintf (file
, "}");
2104 df_mws_dump (struct df_mw_hardreg
**mws
, FILE *file
)
2108 fprintf (file
, "mw %c r[%d..%d]\n",
2109 ((*mws
)->type
== DF_REF_REG_DEF
) ? 'd' : 'u',
2110 (*mws
)->start_regno
, (*mws
)->end_regno
);
2117 df_insn_uid_debug (unsigned int uid
,
2118 bool follow_chain
, FILE *file
)
2120 fprintf (file
, "insn %d luid %d",
2121 uid
, DF_INSN_UID_LUID (uid
));
2123 if (DF_INSN_UID_DEFS (uid
))
2125 fprintf (file
, " defs ");
2126 df_refs_chain_dump (DF_INSN_UID_DEFS (uid
), follow_chain
, file
);
2129 if (DF_INSN_UID_USES (uid
))
2131 fprintf (file
, " uses ");
2132 df_refs_chain_dump (DF_INSN_UID_USES (uid
), follow_chain
, file
);
2135 if (DF_INSN_UID_EQ_USES (uid
))
2137 fprintf (file
, " eq uses ");
2138 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid
), follow_chain
, file
);
2141 if (DF_INSN_UID_MWS (uid
))
2143 fprintf (file
, " mws ");
2144 df_mws_dump (DF_INSN_UID_MWS (uid
), file
);
2146 fprintf (file
, "\n");
2151 df_insn_debug (rtx insn
, bool follow_chain
, FILE *file
)
2153 df_insn_uid_debug (INSN_UID (insn
), follow_chain
, file
);
2157 df_insn_debug_regno (rtx insn
, FILE *file
)
2159 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
2161 fprintf (file
, "insn %d bb %d luid %d defs ",
2162 INSN_UID (insn
), BLOCK_FOR_INSN (insn
)->index
,
2163 DF_INSN_INFO_LUID (insn_info
));
2164 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info
), false, file
);
2166 fprintf (file
, " uses ");
2167 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info
), false, file
);
2169 fprintf (file
, " eq_uses ");
2170 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info
), false, file
);
2171 fprintf (file
, "\n");
2175 df_regno_debug (unsigned int regno
, FILE *file
)
2177 fprintf (file
, "reg %d defs ", regno
);
2178 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno
), file
);
2179 fprintf (file
, " uses ");
2180 df_regs_chain_dump (DF_REG_USE_CHAIN (regno
), file
);
2181 fprintf (file
, " eq_uses ");
2182 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno
), file
);
2183 fprintf (file
, "\n");
2188 df_ref_debug (struct df_ref
*ref
, FILE *file
)
2190 fprintf (file
, "%c%d ",
2191 DF_REF_REG_DEF_P (ref
) ? 'd' : 'u',
2193 fprintf (file
, "reg %d bb %d insn %d flag 0x%x type 0x%x ",
2196 DF_REF_INSN_INFO (ref
) ? INSN_UID (DF_REF_INSN (ref
)) : -1,
2199 if (DF_REF_LOC (ref
))
2200 fprintf (file
, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref
), (void *)*DF_REF_LOC (ref
));
2202 fprintf (file
, "chain ");
2203 df_chain_dump (DF_REF_CHAIN (ref
), file
);
2204 fprintf (file
, "\n");
2207 /* Functions for debugging from GDB. */
2210 debug_df_insn (rtx insn
)
2212 df_insn_debug (insn
, true, stderr
);
2218 debug_df_reg (rtx reg
)
2220 df_regno_debug (REGNO (reg
), stderr
);
2225 debug_df_regno (unsigned int regno
)
2227 df_regno_debug (regno
, stderr
);
2232 debug_df_ref (struct df_ref
*ref
)
2234 df_ref_debug (ref
, stderr
);
2239 debug_df_defno (unsigned int defno
)
2241 df_ref_debug (DF_DEFS_GET (defno
), stderr
);
2246 debug_df_useno (unsigned int defno
)
2248 df_ref_debug (DF_USES_GET (defno
), stderr
);
2253 debug_df_chain (struct df_link
*link
)
2255 df_chain_dump (link
, stderr
);
2256 fputc ('\n', stderr
);