1 /* FIXME: We need to go back and add the warning messages about code
2 moved across setjmp. */
5 /* Scanning of rtl for dataflow analysis.
6 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
7 Free Software Foundation, Inc.
8 Originally contributed by Michael P. Hayes
9 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
10 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
11 and Kenneth Zadeck (zadeck@naturalbridge.com).
13 This file is part of GCC.
15 GCC is free software; you can redistribute it and/or modify it under
16 the terms of the GNU General Public License as published by the Free
17 Software Foundation; either version 2, or (at your option) any later
20 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 You should have received a copy of the GNU General Public License
26 along with GCC; see the file COPYING. If not, write to the Free
27 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
33 #include "coretypes.h"
37 #include "insn-config.h"
42 #include "alloc-pool.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
52 #define HAVE_epilogue 0
55 #define HAVE_prologue 0
57 #ifndef HAVE_sibcall_epilogue
58 #define HAVE_sibcall_epilogue 0
62 #define EPILOGUE_USES(REGNO) 0
65 /* Indicates where we are in the compilation. */
68 /* The bitmap_obstack is used to hold some static variables that
69 should not be reset after each function is compiled. */
71 static bitmap_obstack persistent_obstack
;
73 /* The set of hard registers in eliminables[i].from. */
75 static HARD_REG_SET elim_reg_set
;
77 /* This is a bitmap copy of regs_invalidated_by_call so that we can
78 easily add it into bitmaps, etc. */
80 bitmap df_invalidated_by_call
= NULL
;
82 /* Initialize ur_in and ur_out as if all hard registers were partially
85 bitmap df_all_hard_regs
= NULL
;
87 static void df_ref_record (struct dataflow
*, rtx
, rtx
*,
88 basic_block
, rtx
, enum df_ref_type
,
89 enum df_ref_flags
, bool record_live
);
90 static void df_def_record_1 (struct dataflow
*, rtx
, basic_block
, rtx
,
91 enum df_ref_flags
, bool record_live
);
92 static void df_defs_record (struct dataflow
*, rtx
, basic_block
, rtx
);
93 static void df_uses_record (struct dataflow
*, rtx
*, enum df_ref_type
,
94 basic_block
, rtx
, enum df_ref_flags
);
96 static void df_insn_refs_record (struct dataflow
*, basic_block
, rtx
);
97 static void df_bb_refs_record (struct dataflow
*, basic_block
);
98 static void df_refs_record (struct dataflow
*, bitmap
);
99 static struct df_ref
*df_ref_create_structure (struct dataflow
*, rtx
, rtx
*,
100 basic_block
, rtx
, enum df_ref_type
,
102 static void df_record_exit_block_uses (struct dataflow
*);
103 static void df_grow_reg_info (struct dataflow
*, struct df_ref_info
*);
104 static void df_grow_ref_info (struct df_ref_info
*, unsigned int);
105 static void df_grow_insn_info (struct df
*);
108 /*----------------------------------------------------------------------------
109 SCANNING DATAFLOW PROBLEM
111 There are several ways in which scanning looks just like the other
112 dataflow problems. It shares the all the mechanisms for local info
113 as well as basic block info. Where it differs is when and how often
114 it gets run. It also has no need for the iterative solver.
115 ----------------------------------------------------------------------------*/
117 /* Problem data for the scanning dataflow function. */
118 struct df_scan_problem_data
121 alloc_pool insn_pool
;
125 typedef struct df_scan_bb_info
*df_scan_bb_info_t
;
128 df_scan_free_internal (struct dataflow
*dflow
)
130 struct df
*df
= dflow
->df
;
131 struct df_scan_problem_data
*problem_data
=
132 (struct df_scan_problem_data
*) dflow
->problem_data
;
134 free (df
->def_info
.regs
);
135 free (df
->def_info
.refs
);
136 memset (&df
->def_info
, 0, (sizeof (struct df_ref_info
)));
138 free (df
->use_info
.regs
);
139 free (df
->use_info
.refs
);
140 memset (&df
->use_info
, 0, (sizeof (struct df_ref_info
)));
146 free (dflow
->block_info
);
147 dflow
->block_info
= NULL
;
148 dflow
->block_info_size
= 0;
150 BITMAP_FREE (df
->hardware_regs_used
);
151 BITMAP_FREE (df
->exit_block_uses
);
153 free_alloc_pool (dflow
->block_pool
);
154 free_alloc_pool (problem_data
->ref_pool
);
155 free_alloc_pool (problem_data
->insn_pool
);
156 free_alloc_pool (problem_data
->reg_pool
);
160 /* Get basic block info. */
162 struct df_scan_bb_info
*
163 df_scan_get_bb_info (struct dataflow
*dflow
, unsigned int index
)
165 gcc_assert (index
< dflow
->block_info_size
);
166 return (struct df_scan_bb_info
*) dflow
->block_info
[index
];
170 /* Set basic block info. */
173 df_scan_set_bb_info (struct dataflow
*dflow
, unsigned int index
,
174 struct df_scan_bb_info
*bb_info
)
176 gcc_assert (index
< dflow
->block_info_size
);
177 dflow
->block_info
[index
] = (void *) bb_info
;
181 /* Free basic block info. */
184 df_scan_free_bb_info (struct dataflow
*dflow
, basic_block bb
, void *vbb_info
)
186 struct df_scan_bb_info
*bb_info
= (struct df_scan_bb_info
*) vbb_info
;
189 df_bb_refs_delete (dflow
, bb
->index
);
190 pool_free (dflow
->block_pool
, bb_info
);
195 /* Allocate the problem data for the scanning problem. This should be
196 called when the problem is created or when the entire function is to
200 df_scan_alloc (struct dataflow
*dflow
, bitmap blocks_to_rescan
)
202 struct df
*df
= dflow
->df
;
203 struct df_scan_problem_data
*problem_data
;
204 unsigned int insn_num
= get_max_uid () + 1;
205 unsigned int block_size
= 50;
206 unsigned int bb_index
;
209 /* Given the number of pools, this is really faster than tearing
211 if (dflow
->problem_data
)
212 df_scan_free_internal (dflow
);
215 = create_alloc_pool ("df_scan_block pool",
216 sizeof (struct df_scan_bb_info
),
219 problem_data
= xmalloc (sizeof (struct df_scan_problem_data
));
220 dflow
->problem_data
= problem_data
;
222 problem_data
->ref_pool
223 = create_alloc_pool ("df_scan_ref pool",
224 sizeof (struct df_ref
), block_size
);
225 problem_data
->insn_pool
226 = create_alloc_pool ("df_scan_insn pool",
227 sizeof (struct df_insn_info
), block_size
);
228 problem_data
->reg_pool
229 = create_alloc_pool ("df_scan_reg pool",
230 sizeof (struct df_reg_info
), block_size
);
232 insn_num
+= insn_num
/ 4;
233 df_grow_reg_info (dflow
, &df
->def_info
);
234 df_grow_ref_info (&df
->def_info
, insn_num
);
236 df_grow_reg_info (dflow
, &df
->use_info
);
237 df_grow_ref_info (&df
->use_info
, insn_num
*2);
239 df_grow_insn_info (df
);
240 df_grow_bb_info (dflow
);
242 EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan
, 0, bb_index
, bi
)
244 struct df_scan_bb_info
*bb_info
= df_scan_get_bb_info (dflow
, bb_index
);
247 bb_info
= (struct df_scan_bb_info
*) pool_alloc (dflow
->block_pool
);
248 df_scan_set_bb_info (dflow
, bb_index
, bb_info
);
250 bb_info
->artificial_defs
= NULL
;
251 bb_info
->artificial_uses
= NULL
;
254 df
->hardware_regs_used
= BITMAP_ALLOC (NULL
);
255 df
->exit_block_uses
= BITMAP_ALLOC (NULL
);
259 /* Free all of the data associated with the scan problem. */
262 df_scan_free (struct dataflow
*dflow
)
264 struct df
*df
= dflow
->df
;
266 if (dflow
->problem_data
)
268 df_scan_free_internal (dflow
);
269 free (dflow
->problem_data
);
272 if (df
->blocks_to_scan
)
273 BITMAP_FREE (df
->blocks_to_scan
);
275 if (df
->blocks_to_analyze
)
276 BITMAP_FREE (df
->blocks_to_analyze
);
282 df_scan_dump (struct dataflow
*dflow ATTRIBUTE_UNUSED
, FILE *file ATTRIBUTE_UNUSED
)
284 struct df
*df
= dflow
->df
;
287 fprintf (file
, " all hard regs \t");
288 dump_bitmap (file
, df_all_hard_regs
);
289 fprintf (file
, " invalidated by call \t");
290 dump_bitmap (file
, df_invalidated_by_call
);
291 fprintf (file
, " hardware regs used \t");
292 dump_bitmap (file
, df
->hardware_regs_used
);
293 fprintf (file
, " exit block uses \t");
294 dump_bitmap (file
, df
->exit_block_uses
);
295 fprintf (file
, " regs ever live \t");
296 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
297 if (regs_ever_live
[i
])
298 fprintf (file
, "%d ", i
);
299 fprintf (file
, "\n");
302 static struct df_problem problem_SCAN
=
304 DF_SCAN
, /* Problem id. */
305 DF_NONE
, /* Direction. */
306 df_scan_alloc
, /* Allocate the problem specific data. */
307 NULL
, /* Reset global information. */
308 df_scan_free_bb_info
, /* Free basic block info. */
309 NULL
, /* Local compute function. */
310 NULL
, /* Init the solution specific data. */
311 NULL
, /* Iterative solver. */
312 NULL
, /* Confluence operator 0. */
313 NULL
, /* Confluence operator n. */
314 NULL
, /* Transfer function. */
315 NULL
, /* Finalize function. */
316 df_scan_free
, /* Free all of the problem information. */
317 df_scan_dump
, /* Debugging. */
318 NULL
/* Dependent problem. */
322 /* Create a new DATAFLOW instance and add it to an existing instance
323 of DF. The returned structure is what is used to get at the
327 df_scan_add_problem (struct df
*df
)
329 return df_add_problem (df
, &problem_SCAN
);
332 /*----------------------------------------------------------------------------
333 Storage Allocation Utilities
334 ----------------------------------------------------------------------------*/
337 /* First, grow the reg_info information. If the current size is less than
338 the number of psuedos, grow to 25% more than the number of
341 Second, assure that all of the slots up to max_reg_num have been
342 filled with reg_info structures. */
345 df_grow_reg_info (struct dataflow
*dflow
, struct df_ref_info
*ref_info
)
347 unsigned int max_reg
= max_reg_num ();
348 unsigned int new_size
= max_reg
;
349 struct df_scan_problem_data
*problem_data
=
350 (struct df_scan_problem_data
*) dflow
->problem_data
;
353 if (ref_info
->regs_size
< new_size
)
355 new_size
+= new_size
/ 4;
356 ref_info
->regs
= xrealloc (ref_info
->regs
,
357 new_size
*sizeof (struct df_reg_info
*));
358 ref_info
->regs_size
= new_size
;
361 for (i
= ref_info
->regs_inited
; i
< max_reg
; i
++)
363 struct df_reg_info
*reg_info
= pool_alloc (problem_data
->reg_pool
);
364 memset (reg_info
, 0, sizeof (struct df_reg_info
));
365 ref_info
->regs
[i
] = reg_info
;
368 ref_info
->regs_inited
= max_reg
;
372 /* Grow the ref information. */
375 df_grow_ref_info (struct df_ref_info
*ref_info
, unsigned int new_size
)
377 if (ref_info
->refs_size
< new_size
)
379 ref_info
->refs
= xrealloc (ref_info
->refs
,
380 new_size
*sizeof (struct df_ref
*));
381 memset (ref_info
->refs
+ ref_info
->refs_size
, 0,
382 (new_size
- ref_info
->refs_size
) *sizeof (struct df_ref
*));
383 ref_info
->refs_size
= new_size
;
388 /* Grow the ref information. If the current size is less than the
389 number of instructions, grow to 25% more than the number of
393 df_grow_insn_info (struct df
*df
)
395 unsigned int new_size
= get_max_uid () + 1;
396 if (df
->insns_size
< new_size
)
398 new_size
+= new_size
/ 4;
399 df
->insns
= xrealloc (df
->insns
,
400 new_size
*sizeof (struct df_insn_info
*));
401 memset (df
->insns
+ df
->insns_size
, 0,
402 (new_size
- df
->insns_size
) *sizeof (struct df_insn_info
*));
403 df
->insns_size
= new_size
;
410 /*----------------------------------------------------------------------------
411 PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING.
412 ----------------------------------------------------------------------------*/
414 /* Rescan some BLOCKS or all the blocks defined by the last call to
415 df_set_blocks if BLOCKS is NULL); */
418 df_rescan_blocks (struct df
*df
, bitmap blocks
)
420 bitmap local_blocks_to_scan
= BITMAP_ALLOC (NULL
);
422 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
425 df
->def_info
.refs_organized
= false;
426 df
->use_info
.refs_organized
= false;
432 /* Need to assure that there are space in all of the tables. */
433 unsigned int insn_num
= get_max_uid () + 1;
434 insn_num
+= insn_num
/ 4;
436 df_grow_reg_info (dflow
, &df
->def_info
);
437 df_grow_ref_info (&df
->def_info
, insn_num
);
439 df_grow_reg_info (dflow
, &df
->use_info
);
440 df_grow_ref_info (&df
->use_info
, insn_num
*2);
442 df_grow_insn_info (df
);
443 df_grow_bb_info (dflow
);
445 bitmap_copy (local_blocks_to_scan
, blocks
);
446 df
->def_info
.add_refs_inline
= true;
447 df
->use_info
.add_refs_inline
= true;
449 for (i
= df
->num_problems_defined
; i
; i
--)
451 bitmap blocks_to_reset
= NULL
;
452 if (*dflow
->problem
->reset_fun
)
454 if (!blocks_to_reset
)
456 blocks_to_reset
= BITMAP_ALLOC (NULL
);
457 bitmap_copy (blocks_to_reset
, local_blocks_to_scan
);
458 if (df
->blocks_to_scan
)
459 bitmap_ior_into (blocks_to_reset
, df
->blocks_to_scan
);
461 (*dflow
->problem
->reset_fun
) (dflow
, blocks_to_reset
);
464 BITMAP_FREE (blocks_to_reset
);
467 df_refs_delete (dflow
, local_blocks_to_scan
);
469 /* This may be a mistake, but if an explicit blocks is passed in
470 and the set of blocks to analyze has been explicitly set, add
471 the extra blocks to blocks_to_analyze. The alternative is to
472 put an assert here. We do not want this to just go by
473 silently or else we may get storage leaks. */
474 if (df
->blocks_to_analyze
)
475 bitmap_ior_into (df
->blocks_to_analyze
, blocks
);
479 /* If we are going to do everything, just reallocate everything.
480 Most stuff is allocated in pools so this is faster than
482 if (df
->blocks_to_analyze
)
483 bitmap_copy (local_blocks_to_scan
, df
->blocks_to_analyze
);
487 bitmap_set_bit (local_blocks_to_scan
, bb
->index
);
489 df_scan_alloc (dflow
, local_blocks_to_scan
);
491 df
->def_info
.add_refs_inline
= false;
492 df
->use_info
.add_refs_inline
= false;
495 df_refs_record (dflow
, local_blocks_to_scan
);
497 bitmap_print (stderr
, local_blocks_to_scan
, "scanning: ", "\n");
500 if (!df
->blocks_to_scan
)
501 df
->blocks_to_scan
= BITMAP_ALLOC (NULL
);
503 bitmap_ior_into (df
->blocks_to_scan
, local_blocks_to_scan
);
504 BITMAP_FREE (local_blocks_to_scan
);
507 /* Create a new ref of type DF_REF_TYPE for register REG at address
508 LOC within INSN of BB. */
511 df_ref_create (struct df
*df
, rtx reg
, rtx
*loc
, rtx insn
,
513 enum df_ref_type ref_type
,
514 enum df_ref_flags ref_flags
)
516 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
517 struct df_scan_bb_info
*bb_info
;
519 df_grow_reg_info (dflow
, &df
->use_info
);
520 df_grow_reg_info (dflow
, &df
->def_info
);
521 df_grow_bb_info (dflow
);
523 /* Make sure there is the bb_info for this block. */
524 bb_info
= df_scan_get_bb_info (dflow
, bb
->index
);
527 bb_info
= (struct df_scan_bb_info
*) pool_alloc (dflow
->block_pool
);
528 df_scan_set_bb_info (dflow
, bb
->index
, bb_info
);
529 bb_info
->artificial_defs
= NULL
;
530 bb_info
->artificial_uses
= NULL
;
533 if (ref_type
== DF_REF_REG_DEF
)
534 df
->def_info
.add_refs_inline
= true;
536 df
->use_info
.add_refs_inline
= true;
538 return df_ref_create_structure (dflow
, reg
, loc
, bb
, insn
, ref_type
, ref_flags
);
543 /*----------------------------------------------------------------------------
544 UTILITIES TO CREATE AND DESTROY REFS AND CHAINS.
545 ----------------------------------------------------------------------------*/
548 /* Get the artifical uses for a basic block. */
551 df_get_artificial_defs (struct df
*df
, unsigned int bb_index
)
553 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
554 return df_scan_get_bb_info (dflow
, bb_index
)->artificial_defs
;
558 /* Get the artifical uses for a basic block. */
561 df_get_artificial_uses (struct df
*df
, unsigned int bb_index
)
563 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
564 return df_scan_get_bb_info (dflow
, bb_index
)->artificial_uses
;
568 /* Link REF at the front of reg_use or reg_def chain for REGNO. */
571 df_reg_chain_create (struct df_reg_info
*reg_info
,
574 struct df_ref
*head
= reg_info
->reg_chain
;
575 reg_info
->reg_chain
= ref
;
577 DF_REF_NEXT_REG (ref
) = head
;
579 /* We cannot actually link to the head of the chain. */
580 DF_REF_PREV_REG (ref
) = NULL
;
583 DF_REF_PREV_REG (head
) = ref
;
587 /* Remove REF from the CHAIN. Return the head of the chain. This
588 will be CHAIN unless the REF was at the beginning of the chain. */
590 static struct df_ref
*
591 df_ref_unlink (struct df_ref
*chain
, struct df_ref
*ref
)
593 struct df_ref
*orig_chain
= chain
;
594 struct df_ref
*prev
= NULL
;
601 prev
->next_ref
= ref
->next_ref
;
602 ref
->next_ref
= NULL
;
607 chain
= ref
->next_ref
;
608 ref
->next_ref
= NULL
;
614 chain
= chain
->next_ref
;
617 /* Someone passed in a ref that was not in the chain. */
623 /* Unlink and delete REF at the reg_use or reg_def chain. Also delete
624 the def-use or use-def chain if it exists. Returns the next ref in
625 uses or defs chain. */
628 df_reg_chain_unlink (struct dataflow
*dflow
, struct df_ref
*ref
)
630 struct df
*df
= dflow
->df
;
631 struct df_ref
*next
= DF_REF_NEXT_REG (ref
);
632 struct df_ref
*prev
= DF_REF_PREV_REG (ref
);
633 struct df_scan_problem_data
*problem_data
=
634 (struct df_scan_problem_data
*) dflow
->problem_data
;
635 struct df_reg_info
*reg_info
;
636 struct df_ref
*next_ref
= ref
->next_ref
;
637 unsigned int id
= DF_REF_ID (ref
);
639 if (DF_REF_TYPE (ref
) == DF_REF_REG_DEF
)
641 reg_info
= DF_REG_DEF_GET (df
, DF_REF_REGNO (ref
));
642 df
->def_info
.bitmap_size
--;
643 if (df
->def_info
.refs
&& (id
< df
->def_info
.refs_size
))
644 DF_DEFS_SET (df
, id
, NULL
);
648 reg_info
= DF_REG_USE_GET (df
, DF_REF_REGNO (ref
));
649 df
->use_info
.bitmap_size
--;
650 if (df
->use_info
.refs
&& (id
< df
->use_info
.refs_size
))
651 DF_USES_SET (df
, id
, NULL
);
654 /* Delete any def-use or use-def chains that start here. */
655 if (DF_REF_CHAIN (ref
))
656 df_chain_unlink (df
->problems_by_index
[DF_CHAIN
], ref
, NULL
);
660 /* Unlink from the reg chain. If there is no prev, this is the
661 first of the list. If not, just join the next and prev. */
664 DF_REF_NEXT_REG (prev
) = next
;
666 DF_REF_PREV_REG (next
) = prev
;
670 reg_info
->reg_chain
= next
;
672 DF_REF_PREV_REG (next
) = NULL
;
675 pool_free (problem_data
->ref_pool
, ref
);
680 /* Unlink REF from all def-use/use-def chains, etc. */
683 df_ref_remove (struct df
*df
, struct df_ref
*ref
)
685 struct dataflow
*dflow
= df
->problems_by_index
[DF_SCAN
];
686 if (DF_REF_REG_DEF_P (ref
))
688 if (DF_REF_FLAGS (ref
) & DF_REF_ARTIFICIAL
)
690 struct df_scan_bb_info
*bb_info
691 = df_scan_get_bb_info (dflow
, DF_REF_BB (ref
)->index
);
692 bb_info
->artificial_defs
693 = df_ref_unlink (bb_info
->artificial_defs
, ref
);
696 DF_INSN_UID_DEFS (df
, DF_REF_INSN_UID (ref
)) =
697 df_ref_unlink (DF_INSN_UID_DEFS (df
, DF_REF_INSN_UID (ref
)), ref
);
699 if (df
->def_info
.add_refs_inline
)
700 DF_DEFS_SET (df
, DF_REF_ID (ref
), NULL
);
704 if (DF_REF_FLAGS (ref
) & DF_REF_ARTIFICIAL
)
706 struct df_scan_bb_info
*bb_info
707 = df_scan_get_bb_info (dflow
, DF_REF_BB (ref
)->index
);
708 bb_info
->artificial_uses
709 = df_ref_unlink (bb_info
->artificial_uses
, ref
);
712 DF_INSN_UID_USES (df
, DF_REF_INSN_UID (ref
)) =
713 df_ref_unlink (DF_INSN_UID_USES (df
, DF_REF_INSN_UID (ref
)), ref
);
715 if (df
->use_info
.add_refs_inline
)
716 DF_USES_SET (df
, DF_REF_ID (ref
), NULL
);
719 df_reg_chain_unlink (dflow
, ref
);
723 /* Create the insn record for INSN. If there was one there, zero it out. */
725 static struct df_insn_info
*
726 df_insn_create_insn_record (struct dataflow
*dflow
, rtx insn
)
728 struct df
*df
= dflow
->df
;
729 struct df_scan_problem_data
*problem_data
=
730 (struct df_scan_problem_data
*) dflow
->problem_data
;
732 struct df_insn_info
*insn_rec
= DF_INSN_GET (df
, insn
);
735 insn_rec
= pool_alloc (problem_data
->insn_pool
);
736 DF_INSN_SET (df
, insn
, insn_rec
);
738 memset (insn_rec
, 0, sizeof (struct df_insn_info
));
744 /* Delete all of the refs information from INSN. */
747 df_insn_refs_delete (struct dataflow
*dflow
, rtx insn
)
749 struct df
*df
= dflow
->df
;
750 unsigned int uid
= INSN_UID (insn
);
751 struct df_insn_info
*insn_info
= NULL
;
753 struct df_scan_problem_data
*problem_data
=
754 (struct df_scan_problem_data
*) dflow
->problem_data
;
756 if (uid
< df
->insns_size
)
757 insn_info
= DF_INSN_UID_GET (df
, uid
);
761 ref
= insn_info
->defs
;
763 ref
= df_reg_chain_unlink (dflow
, ref
);
765 ref
= insn_info
->uses
;
767 ref
= df_reg_chain_unlink (dflow
, ref
);
769 pool_free (problem_data
->insn_pool
, insn_info
);
770 DF_INSN_SET (df
, insn
, NULL
);
775 /* Delete all of the refs information from basic_block with BB_INDEX. */
778 df_bb_refs_delete (struct dataflow
*dflow
, int bb_index
)
783 struct df_scan_bb_info
*bb_info
784 = df_scan_get_bb_info (dflow
, bb_index
);
786 basic_block bb
= BASIC_BLOCK (bb_index
);
787 FOR_BB_INSNS (bb
, insn
)
791 /* Record defs within INSN. */
792 df_insn_refs_delete (dflow
, insn
);
796 /* Get rid of any artifical uses or defs. */
799 def
= bb_info
->artificial_defs
;
801 def
= df_reg_chain_unlink (dflow
, def
);
802 bb_info
->artificial_defs
= NULL
;
803 use
= bb_info
->artificial_uses
;
805 use
= df_reg_chain_unlink (dflow
, use
);
806 bb_info
->artificial_uses
= NULL
;
811 /* Delete all of the refs information from BLOCKS. */
814 df_refs_delete (struct dataflow
*dflow
, bitmap blocks
)
817 unsigned int bb_index
;
819 EXECUTE_IF_SET_IN_BITMAP (blocks
, 0, bb_index
, bi
)
821 df_bb_refs_delete (dflow
, bb_index
);
826 /* Take build ref table for either the uses or defs from the reg-use
827 or reg-def chains. */
830 df_reorganize_refs (struct df_ref_info
*ref_info
)
832 unsigned int m
= ref_info
->regs_inited
;
834 unsigned int offset
= 0;
835 unsigned int size
= 0;
837 if (ref_info
->refs_organized
)
840 if (ref_info
->refs_size
< ref_info
->bitmap_size
)
842 int new_size
= ref_info
->bitmap_size
+ ref_info
->bitmap_size
/ 4;
843 df_grow_ref_info (ref_info
, new_size
);
846 for (regno
= 0; regno
< m
; regno
++)
848 struct df_reg_info
*reg_info
= ref_info
->regs
[regno
];
852 struct df_ref
*ref
= reg_info
->reg_chain
;
853 reg_info
->begin
= offset
;
856 ref_info
->refs
[offset
] = ref
;
857 DF_REF_ID (ref
) = offset
++;
858 ref
= DF_REF_NEXT_REG (ref
);
862 reg_info
->n_refs
= count
;
866 /* The bitmap size is not decremented when refs are deleted. So
867 reset it now that we have squished out all of the empty
869 ref_info
->bitmap_size
= size
;
870 ref_info
->refs_organized
= true;
871 ref_info
->add_refs_inline
= true;
875 /* Local miscellaneous routines. */
877 /* Local routines for recording refs. */
879 /* Set where we are in the compilation. */
882 df_set_state (int state
)
889 /*----------------------------------------------------------------------------
890 Hard core instruction scanning code. No external interfaces here,
891 just a lot of routines that look inside insns.
892 ----------------------------------------------------------------------------*/
894 /* Create a ref and add it to the reg-def or reg-use chains. */
896 static struct df_ref
*
897 df_ref_create_structure (struct dataflow
*dflow
, rtx reg
, rtx
*loc
,
898 basic_block bb
, rtx insn
,
899 enum df_ref_type ref_type
,
900 enum df_ref_flags ref_flags
)
902 struct df_ref
*this_ref
;
903 struct df
*df
= dflow
->df
;
904 int regno
= REGNO (GET_CODE (reg
) == SUBREG
? SUBREG_REG (reg
) : reg
);
905 struct df_scan_problem_data
*problem_data
=
906 (struct df_scan_problem_data
*) dflow
->problem_data
;
908 this_ref
= pool_alloc (problem_data
->ref_pool
);
909 DF_REF_REG (this_ref
) = reg
;
910 DF_REF_REGNO (this_ref
) = regno
;
911 DF_REF_LOC (this_ref
) = loc
;
912 DF_REF_INSN (this_ref
) = insn
;
913 DF_REF_CHAIN (this_ref
) = NULL
;
914 DF_REF_TYPE (this_ref
) = ref_type
;
915 DF_REF_FLAGS (this_ref
) = ref_flags
;
916 DF_REF_DATA (this_ref
) = NULL
;
917 DF_REF_BB (this_ref
) = bb
;
919 /* Link the ref into the reg_def and reg_use chains and keep a count
921 if (ref_type
== DF_REF_REG_DEF
)
923 struct df_reg_info
*reg_info
= DF_REG_DEF_GET (df
, regno
);
926 /* Add the ref to the reg_def chain. */
927 df_reg_chain_create (reg_info
, this_ref
);
928 DF_REF_ID (this_ref
) = df
->def_info
.bitmap_size
;
929 if (df
->def_info
.add_refs_inline
)
931 if (DF_DEFS_SIZE (df
) >= df
->def_info
.refs_size
)
933 int new_size
= df
->def_info
.bitmap_size
934 + df
->def_info
.bitmap_size
/ 4;
935 df_grow_ref_info (&df
->def_info
, new_size
);
937 /* Add the ref to the big array of defs. */
938 DF_DEFS_SET (df
, df
->def_info
.bitmap_size
, this_ref
);
939 df
->def_info
.refs_organized
= false;
942 df
->def_info
.bitmap_size
++;
944 if (DF_REF_FLAGS (this_ref
) & DF_REF_ARTIFICIAL
)
946 struct df_scan_bb_info
*bb_info
947 = df_scan_get_bb_info (dflow
, bb
->index
);
948 this_ref
->next_ref
= bb_info
->artificial_defs
;
949 bb_info
->artificial_defs
= this_ref
;
953 this_ref
->next_ref
= DF_INSN_GET (df
, insn
)->defs
;
954 DF_INSN_GET (df
, insn
)->defs
= this_ref
;
959 struct df_reg_info
*reg_info
= DF_REG_USE_GET (df
, regno
);
962 /* Add the ref to the reg_use chain. */
963 df_reg_chain_create (reg_info
, this_ref
);
964 DF_REF_ID (this_ref
) = df
->use_info
.bitmap_size
;
965 if (df
->use_info
.add_refs_inline
)
967 if (DF_USES_SIZE (df
) >= df
->use_info
.refs_size
)
969 int new_size
= df
->use_info
.bitmap_size
970 + df
->use_info
.bitmap_size
/ 4;
971 df_grow_ref_info (&df
->use_info
, new_size
);
973 /* Add the ref to the big array of defs. */
974 DF_USES_SET (df
, df
->use_info
.bitmap_size
, this_ref
);
975 df
->use_info
.refs_organized
= false;
978 df
->use_info
.bitmap_size
++;
979 if (DF_REF_FLAGS (this_ref
) & DF_REF_ARTIFICIAL
)
981 struct df_scan_bb_info
*bb_info
982 = df_scan_get_bb_info (dflow
, bb
->index
);
983 this_ref
->next_ref
= bb_info
->artificial_uses
;
984 bb_info
->artificial_uses
= this_ref
;
988 this_ref
->next_ref
= DF_INSN_GET (df
, insn
)->uses
;
989 DF_INSN_GET (df
, insn
)->uses
= this_ref
;
996 /* Create new references of type DF_REF_TYPE for each part of register REG
997 at address LOC within INSN of BB. */
1000 df_ref_record (struct dataflow
*dflow
, rtx reg
, rtx
*loc
,
1001 basic_block bb
, rtx insn
,
1002 enum df_ref_type ref_type
,
1003 enum df_ref_flags ref_flags
,
1007 struct df
*df
= dflow
->df
;
1009 gcc_assert (REG_P (reg
) || GET_CODE (reg
) == SUBREG
);
1011 /* For the reg allocator we are interested in some SUBREG rtx's, but not
1012 all. Notably only those representing a word extraction from a multi-word
1013 reg. As written in the docu those should have the form
1014 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
1015 XXX Is that true? We could also use the global word_mode variable. */
1016 if ((df
->flags
& DF_SUBREGS
) == 0
1017 && GET_CODE (reg
) == SUBREG
1018 && (GET_MODE_SIZE (GET_MODE (reg
)) < GET_MODE_SIZE (word_mode
)
1019 || GET_MODE_SIZE (GET_MODE (reg
))
1020 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg
)))))
1022 loc
= &SUBREG_REG (reg
);
1024 ref_flags
|= DF_REF_STRIPPED
;
1027 regno
= REGNO (GET_CODE (reg
) == SUBREG
? SUBREG_REG (reg
) : reg
);
1028 if (regno
< FIRST_PSEUDO_REGISTER
)
1033 if (! (df
->flags
& DF_HARD_REGS
))
1036 /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG
1037 for the mode, because we only want to add references to regs, which
1038 are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
1039 reference the whole reg 0 in DI mode (which would also include
1040 reg 1, at least, if 0 and 1 are SImode registers). */
1041 endregno
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1042 if (GET_CODE (reg
) == SUBREG
)
1043 regno
+= subreg_regno_offset (regno
, GET_MODE (SUBREG_REG (reg
)),
1044 SUBREG_BYTE (reg
), GET_MODE (reg
));
1047 for (i
= regno
; i
< endregno
; i
++)
1049 /* Calls are handled at call site because regs_ever_live
1050 doesn't include clobbered regs, only used ones. */
1051 if (ref_type
== DF_REF_REG_DEF
&& record_live
)
1052 regs_ever_live
[i
] = 1;
1053 else if ((ref_type
== DF_REF_REG_USE
1054 || ref_type
== DF_REF_REG_MEM_STORE
1055 || ref_type
== DF_REF_REG_MEM_LOAD
)
1056 && ((ref_flags
& DF_REF_ARTIFICIAL
) == 0))
1058 /* Set regs_ever_live on uses of non-eliminable frame
1059 pointers and arg pointers. */
1060 if (! (TEST_HARD_REG_BIT (elim_reg_set
, regno
)
1061 && (regno
== FRAME_POINTER_REGNUM
1062 || regno
== ARG_POINTER_REGNUM
)))
1063 regs_ever_live
[i
] = 1;
1066 df_ref_create_structure (dflow
, regno_reg_rtx
[i
], loc
,
1067 bb
, insn
, ref_type
, ref_flags
);
1072 df_ref_create_structure (dflow
, reg
, loc
,
1073 bb
, insn
, ref_type
, ref_flags
);
1078 /* A set to a non-paradoxical SUBREG for which the number of word_mode units
1079 covered by the outer mode is smaller than that covered by the inner mode,
1080 is a read-modify-write operation.
1081 This function returns true iff the SUBREG X is such a SUBREG. */
1084 df_read_modify_subreg_p (rtx x
)
1086 unsigned int isize
, osize
;
1087 if (GET_CODE (x
) != SUBREG
)
1089 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1090 osize
= GET_MODE_SIZE (GET_MODE (x
));
1091 return (isize
> osize
&& isize
> UNITS_PER_WORD
);
1095 /* Process all the registers defined in the rtx, X.
1096 Autoincrement/decrement definitions will be picked up by
1100 df_def_record_1 (struct dataflow
*dflow
, rtx x
,
1101 basic_block bb
, rtx insn
,
1102 enum df_ref_flags flags
, bool record_live
)
1107 /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
1109 if (GET_CODE (x
) == EXPR_LIST
|| GET_CODE (x
) == CLOBBER
)
1112 loc
= &SET_DEST (x
);
1115 /* Some targets place small structures in registers for
1116 return values of functions. */
1117 if (GET_CODE (dst
) == PARALLEL
&& GET_MODE (dst
) == BLKmode
)
1121 for (i
= XVECLEN (dst
, 0) - 1; i
>= 0; i
--)
1123 rtx temp
= XVECEXP (dst
, 0, i
);
1124 if (GET_CODE (temp
) == EXPR_LIST
|| GET_CODE (temp
) == CLOBBER
1125 || GET_CODE (temp
) == SET
)
1126 df_def_record_1 (dflow
, temp
, bb
, insn
,
1127 GET_CODE (temp
) == CLOBBER
? flags
| DF_REF_CLOBBER
: flags
,
1133 /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
1134 be handy for the reg allocator. */
1135 while (GET_CODE (dst
) == STRICT_LOW_PART
1136 || GET_CODE (dst
) == ZERO_EXTRACT
1137 || df_read_modify_subreg_p (dst
))
1140 /* Strict low part always contains SUBREG, but we do not want to make
1141 it appear outside, as whole register is always considered. */
1142 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1144 loc
= &XEXP (dst
, 0);
1148 loc
= &XEXP (dst
, 0);
1150 flags
|= DF_REF_READ_WRITE
;
1154 || (GET_CODE (dst
) == SUBREG
&& REG_P (SUBREG_REG (dst
))))
1155 df_ref_record (dflow
, dst
, loc
, bb
, insn
,
1156 DF_REF_REG_DEF
, flags
, record_live
);
1160 /* Process all the registers defined in the pattern rtx, X. */
1163 df_defs_record (struct dataflow
*dflow
, rtx x
, basic_block bb
, rtx insn
)
1165 RTX_CODE code
= GET_CODE (x
);
1167 if (code
== SET
|| code
== CLOBBER
)
1169 /* Mark the single def within the pattern. */
1170 df_def_record_1 (dflow
, x
, bb
, insn
,
1171 code
== CLOBBER
? DF_REF_CLOBBER
: 0, true);
1173 else if (code
== COND_EXEC
)
1175 df_defs_record (dflow
, COND_EXEC_CODE (x
), bb
, insn
);
1177 else if (code
== PARALLEL
)
1181 /* Mark the multiple defs within the pattern. */
1182 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1183 df_defs_record (dflow
, XVECEXP (x
, 0, i
), bb
, insn
);
1188 /* Process all the registers used in the rtx at address LOC. */
1191 df_uses_record (struct dataflow
*dflow
, rtx
*loc
, enum df_ref_type ref_type
,
1192 basic_block bb
, rtx insn
, enum df_ref_flags flags
)
1200 code
= GET_CODE (x
);
1216 /* If we are clobbering a MEM, mark any registers inside the address
1218 if (MEM_P (XEXP (x
, 0)))
1219 df_uses_record (dflow
, &XEXP (XEXP (x
, 0), 0),
1220 DF_REF_REG_MEM_STORE
, bb
, insn
, flags
);
1222 /* If we're clobbering a REG then we have a def so ignore. */
1226 df_uses_record (dflow
, &XEXP (x
, 0), DF_REF_REG_MEM_LOAD
, bb
, insn
,
1227 flags
& DF_REF_IN_NOTE
);
1231 /* While we're here, optimize this case. */
1233 /* In case the SUBREG is not of a REG, do not optimize. */
1234 if (!REG_P (SUBREG_REG (x
)))
1236 loc
= &SUBREG_REG (x
);
1237 df_uses_record (dflow
, loc
, ref_type
, bb
, insn
, flags
);
1240 /* ... Fall through ... */
1243 df_ref_record (dflow
, x
, loc
, bb
, insn
, ref_type
, flags
, true);
1248 rtx dst
= SET_DEST (x
);
1249 gcc_assert (!(flags
& DF_REF_IN_NOTE
));
1250 df_uses_record (dflow
, &SET_SRC (x
), DF_REF_REG_USE
, bb
, insn
, 0);
1252 switch (GET_CODE (dst
))
1255 if (df_read_modify_subreg_p (dst
))
1257 df_uses_record (dflow
, &SUBREG_REG (dst
),
1259 insn
, DF_REF_READ_WRITE
);
1270 df_uses_record (dflow
, &XEXP (dst
, 0),
1271 DF_REF_REG_MEM_STORE
,
1274 case STRICT_LOW_PART
:
1276 rtx
*temp
= &XEXP (dst
, 0);
1277 /* A strict_low_part uses the whole REG and not just the
1279 dst
= XEXP (dst
, 0);
1280 df_uses_record (dflow
,
1281 (GET_CODE (dst
) == SUBREG
)
1282 ? &SUBREG_REG (dst
) : temp
,
1284 insn
, DF_REF_READ_WRITE
);
1289 df_uses_record (dflow
, &XEXP (dst
, 0),
1290 DF_REF_REG_USE
, bb
, insn
,
1292 df_uses_record (dflow
, &XEXP (dst
, 1),
1293 DF_REF_REG_USE
, bb
, insn
, 0);
1294 df_uses_record (dflow
, &XEXP (dst
, 2),
1295 DF_REF_REG_USE
, bb
, insn
, 0);
1296 dst
= XEXP (dst
, 0);
1308 case UNSPEC_VOLATILE
:
1312 /* Traditional and volatile asm instructions must be
1313 considered to use and clobber all hard registers, all
1314 pseudo-registers and all of memory. So must TRAP_IF and
1315 UNSPEC_VOLATILE operations.
1317 Consider for instance a volatile asm that changes the fpu
1318 rounding mode. An insn should not be moved across this
1319 even if it only uses pseudo-regs because it might give an
1320 incorrectly rounded result.
1322 However, flow.c's liveness computation did *not* do this,
1323 giving the reasoning as " ?!? Unfortunately, marking all
1324 hard registers as live causes massive problems for the
1325 register allocator and marking all pseudos as live creates
1326 mountains of uninitialized variable warnings."
1328 In order to maintain the status quo with regard to liveness
1329 and uses, we do what flow.c did and just mark any regs we
1330 can find in ASM_OPERANDS as used. Later on, when liveness
1331 is computed, asm insns are scanned and regs_asm_clobbered
1334 For all ASM_OPERANDS, we must traverse the vector of input
1335 operands. We can not just fall through here since then we
1336 would be confused by the ASM_INPUT rtx inside ASM_OPERANDS,
1337 which do not indicate traditional asms unlike their normal
1339 if (code
== ASM_OPERANDS
)
1343 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
1344 df_uses_record (dflow
, &ASM_OPERANDS_INPUT (x
, j
),
1345 DF_REF_REG_USE
, bb
, insn
, 0);
1357 /* Catch the def of the register being modified. */
1358 df_ref_record (dflow
, XEXP (x
, 0), &XEXP (x
, 0), bb
, insn
,
1359 DF_REF_REG_DEF
, DF_REF_READ_WRITE
, true);
1361 /* ... Fall through to handle uses ... */
1367 /* Recursively scan the operands of this expression. */
1369 const char *fmt
= GET_RTX_FORMAT (code
);
1372 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1376 /* Tail recursive case: save a function call level. */
1382 df_uses_record (dflow
, &XEXP (x
, i
), ref_type
, bb
, insn
, flags
);
1384 else if (fmt
[i
] == 'E')
1387 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1388 df_uses_record (dflow
, &XVECEXP (x
, i
, j
), ref_type
,
1395 /* Return true if *LOC contains an asm. */
1398 df_insn_contains_asm_1 (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
1402 if (GET_CODE (*loc
) == ASM_OPERANDS
)
1408 /* Return true if INSN contains an ASM. */
1411 df_insn_contains_asm (rtx insn
)
1413 return for_each_rtx (&insn
, df_insn_contains_asm_1
, NULL
);
1418 /* Record all the refs for DF within INSN of basic block BB. */
1421 df_insn_refs_record (struct dataflow
*dflow
, basic_block bb
, rtx insn
)
1424 struct df
*df
= dflow
->df
;
1430 if (df_insn_contains_asm (insn
))
1431 DF_INSN_CONTAINS_ASM (df
, insn
) = true;
1433 /* Record register defs. */
1434 df_defs_record (dflow
, PATTERN (insn
), bb
, insn
);
1436 if (df
->flags
& DF_EQUIV_NOTES
)
1437 for (note
= REG_NOTES (insn
); note
;
1438 note
= XEXP (note
, 1))
1440 switch (REG_NOTE_KIND (note
))
1444 df_uses_record (dflow
, &XEXP (note
, 0), DF_REF_REG_USE
,
1445 bb
, insn
, DF_REF_IN_NOTE
);
1455 /* Record the registers used to pass arguments, and explicitly
1456 noted as clobbered. */
1457 for (note
= CALL_INSN_FUNCTION_USAGE (insn
); note
;
1458 note
= XEXP (note
, 1))
1460 if (GET_CODE (XEXP (note
, 0)) == USE
)
1461 df_uses_record (dflow
, &XEXP (XEXP (note
, 0), 0),
1464 else if (GET_CODE (XEXP (note
, 0)) == CLOBBER
)
1466 df_defs_record (dflow
, XEXP (note
, 0), bb
, insn
);
1467 if (REG_P (XEXP (XEXP (note
, 0), 0)))
1469 rtx reg
= XEXP (XEXP (note
, 0), 0);
1474 regno_last
= regno_first
= REGNO (reg
);
1475 if (regno_first
< FIRST_PSEUDO_REGISTER
)
1477 += hard_regno_nregs
[regno_first
][GET_MODE (reg
)] - 1;
1478 for (i
= regno_first
; i
<= regno_last
; i
++)
1479 regs_ever_live
[i
] = 1;
1484 /* The stack ptr is used (honorarily) by a CALL insn. */
1485 df_uses_record (dflow
, ®no_reg_rtx
[STACK_POINTER_REGNUM
],
1486 DF_REF_REG_USE
, bb
, insn
,
1489 if (df
->flags
& DF_HARD_REGS
)
1493 /* Calls may also reference any of the global registers,
1494 so they are recorded as used. */
1495 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1497 df_uses_record (dflow
, ®no_reg_rtx
[i
],
1498 DF_REF_REG_USE
, bb
, insn
,
1500 EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call
, 0, ui
, bi
)
1501 df_ref_record (dflow
, regno_reg_rtx
[ui
], ®no_reg_rtx
[ui
], bb
, insn
,
1502 DF_REF_REG_DEF
, DF_REF_CLOBBER
, false);
1506 /* Record the register uses. */
1507 df_uses_record (dflow
, &PATTERN (insn
),
1508 DF_REF_REG_USE
, bb
, insn
, 0);
1514 df_has_eh_preds (basic_block bb
)
1519 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1521 if (e
->flags
& EDGE_EH
)
1527 /* Record all the refs within the basic block BB. */
1530 df_bb_refs_record (struct dataflow
*dflow
, basic_block bb
)
1532 struct df
*df
= dflow
->df
;
1535 struct df_scan_bb_info
*bb_info
= df_scan_get_bb_info (dflow
, bb
->index
);
1537 /* Need to make sure that there is a record in the basic block info. */
1540 bb_info
= (struct df_scan_bb_info
*) pool_alloc (dflow
->block_pool
);
1541 df_scan_set_bb_info (dflow
, bb
->index
, bb_info
);
1542 bb_info
->artificial_defs
= NULL
;
1543 bb_info
->artificial_uses
= NULL
;
1546 /* Scan the block an insn at a time from beginning to end. */
1547 FOR_BB_INSNS (bb
, insn
)
1549 df_insn_create_insn_record (dflow
, insn
);
1552 /* Record defs within INSN. */
1553 DF_INSN_LUID (df
, insn
) = luid
++;
1554 df_insn_refs_record (dflow
, bb
, insn
);
1556 DF_INSN_LUID (df
, insn
) = luid
;
1559 #ifdef EH_RETURN_DATA_REGNO
1560 if ((df
->flags
& DF_HARD_REGS
)
1561 && df_has_eh_preds (bb
))
1564 /* Mark the registers that will contain data for the handler. */
1565 if (current_function_calls_eh_return
)
1568 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
1569 if (regno
== INVALID_REGNUM
)
1571 df_ref_record (dflow
, regno_reg_rtx
[i
], ®no_reg_rtx
[i
], bb
, NULL
,
1572 DF_REF_REG_DEF
, DF_REF_ARTIFICIAL
| DF_REF_AT_TOP
, false);
1578 /* This code is putting in a artificial ref for the use at the TOP
1579 of the block that receives the exception. It is too cumbersome
1580 to actually put the ref on the edge. We could either model this
1581 at the top of the receiver block or the bottom of the sender
1584 The bottom of the sender block is problematic because not all
1585 out-edges of the a block are eh-edges. However, it is true that
1586 all edges into a block are either eh-edges or none of them are
1587 eh-edges. Thus, we can model this at the top of the eh-receiver
1588 for all of the edges at once. */
1589 if ((df
->flags
& DF_HARD_REGS
)
1590 && df_has_eh_preds (bb
))
1593 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1595 df_uses_record (dflow
, ®no_reg_rtx
[i
],
1596 DF_REF_REG_USE
, EXIT_BLOCK_PTR
, NULL
,
1597 DF_REF_ARTIFICIAL
| DF_REF_AT_TOP
);
1601 if ((df
->flags
& DF_HARD_REGS
)
1602 && bb
->index
>= NUM_FIXED_BLOCKS
)
1604 /* Before reload, there are a few registers that must be forced
1605 live everywhere -- which might not already be the case for
1606 blocks within infinite loops. */
1607 if (! reload_completed
)
1610 /* Any reference to any pseudo before reload is a potential
1611 reference of the frame pointer. */
1612 df_uses_record (dflow
, ®no_reg_rtx
[FRAME_POINTER_REGNUM
],
1613 DF_REF_REG_USE
, bb
, NULL
, DF_REF_ARTIFICIAL
);
1615 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1616 /* Pseudos with argument area equivalences may require
1617 reloading via the argument pointer. */
1618 if (fixed_regs
[ARG_POINTER_REGNUM
])
1619 df_uses_record (dflow
, ®no_reg_rtx
[ARG_POINTER_REGNUM
],
1620 DF_REF_REG_USE
, bb
, NULL
,
1624 /* Any constant, or pseudo with constant equivalences, may
1625 require reloading from memory using the pic register. */
1626 if ((unsigned) PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
1627 && fixed_regs
[PIC_OFFSET_TABLE_REGNUM
])
1628 df_uses_record (dflow
, ®no_reg_rtx
[PIC_OFFSET_TABLE_REGNUM
],
1629 DF_REF_REG_USE
, bb
, NULL
,
1632 /* The all-important stack pointer must always be live. */
1633 df_uses_record (dflow
, ®no_reg_rtx
[STACK_POINTER_REGNUM
],
1634 DF_REF_REG_USE
, bb
, NULL
, DF_REF_ARTIFICIAL
);
1639 /* Record all the refs in the basic blocks specified by BLOCKS. */
1642 df_refs_record (struct dataflow
*dflow
, bitmap blocks
)
1644 unsigned int bb_index
;
1647 EXECUTE_IF_SET_IN_BITMAP (blocks
, 0, bb_index
, bi
)
1649 basic_block bb
= BASIC_BLOCK (bb_index
);
1650 df_bb_refs_record (dflow
, bb
);
1653 if (bitmap_bit_p (blocks
, EXIT_BLOCK
))
1654 df_record_exit_block_uses (dflow
);
1658 /*----------------------------------------------------------------------------
1659 Specialized hard register scanning functions.
1660 ----------------------------------------------------------------------------*/
1662 /* Mark a register in SET. Hard registers in large modes get all
1663 of their component registers set as well. */
1666 df_mark_reg (rtx reg
, void *vset
)
1668 bitmap set
= (bitmap
) vset
;
1669 int regno
= REGNO (reg
);
1671 gcc_assert (GET_MODE (reg
) != BLKmode
);
1673 bitmap_set_bit (set
, regno
);
1674 if (regno
< FIRST_PSEUDO_REGISTER
)
1676 int n
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1678 bitmap_set_bit (set
, regno
+ n
);
1682 /* Record the set of hard registers that are used in the exit block. */
1685 df_record_exit_block_uses (struct dataflow
*dflow
)
1689 struct df
*df
= dflow
->df
;
1691 bitmap_clear (df
->exit_block_uses
);
1693 if (! (df
->flags
& DF_HARD_REGS
))
1696 /* If exiting needs the right stack value, consider the stack
1697 pointer live at the end of the function. */
1698 if ((HAVE_epilogue
&& epilogue_completed
)
1699 || ! EXIT_IGNORE_STACK
1700 || (! FRAME_POINTER_REQUIRED
1701 && ! current_function_calls_alloca
1702 && flag_omit_frame_pointer
)
1703 || current_function_sp_is_unchanging
)
1705 bitmap_set_bit (df
->exit_block_uses
, STACK_POINTER_REGNUM
);
1708 /* Mark the frame pointer if needed at the end of the function.
1709 If we end up eliminating it, it will be removed from the live
1710 list of each basic block by reload. */
1712 if (! reload_completed
|| frame_pointer_needed
)
1714 bitmap_set_bit (df
->exit_block_uses
, FRAME_POINTER_REGNUM
);
1715 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1716 /* If they are different, also mark the hard frame pointer as live. */
1717 if (! LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM
))
1718 bitmap_set_bit (df
->exit_block_uses
, HARD_FRAME_POINTER_REGNUM
);
1722 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1723 /* Many architectures have a GP register even without flag_pic.
1724 Assume the pic register is not in use, or will be handled by
1725 other means, if it is not fixed. */
1726 if ((unsigned) PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
1727 && fixed_regs
[PIC_OFFSET_TABLE_REGNUM
])
1728 bitmap_set_bit (df
->exit_block_uses
, PIC_OFFSET_TABLE_REGNUM
);
1731 /* Mark all global registers, and all registers used by the
1732 epilogue as being live at the end of the function since they
1733 may be referenced by our caller. */
1734 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1735 if (global_regs
[i
] || EPILOGUE_USES (i
))
1736 bitmap_set_bit (df
->exit_block_uses
, i
);
1738 if (HAVE_epilogue
&& epilogue_completed
)
1740 /* Mark all call-saved registers that we actually used. */
1741 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1742 if (regs_ever_live
[i
] && ! LOCAL_REGNO (i
)
1743 && ! TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1744 bitmap_set_bit (df
->exit_block_uses
, i
);
1747 #ifdef EH_RETURN_DATA_REGNO
1748 /* Mark the registers that will contain data for the handler. */
1749 if (reload_completed
&& current_function_calls_eh_return
)
1752 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
1753 if (regno
== INVALID_REGNUM
)
1755 bitmap_set_bit (df
->exit_block_uses
, regno
);
1759 #ifdef EH_RETURN_STACKADJ_RTX
1760 if ((! HAVE_epilogue
|| ! epilogue_completed
)
1761 && current_function_calls_eh_return
)
1763 rtx tmp
= EH_RETURN_STACKADJ_RTX
;
1764 if (tmp
&& REG_P (tmp
))
1765 df_mark_reg (tmp
, df
->exit_block_uses
);
1769 #ifdef EH_RETURN_HANDLER_RTX
1770 if ((! HAVE_epilogue
|| ! epilogue_completed
)
1771 && current_function_calls_eh_return
)
1773 rtx tmp
= EH_RETURN_HANDLER_RTX
;
1774 if (tmp
&& REG_P (tmp
))
1775 df_mark_reg (tmp
, df
->exit_block_uses
);
1779 /* Mark function return value. */
1780 diddle_return_value (df_mark_reg
, (void*) df
->exit_block_uses
);
1782 if (df
->flags
& DF_HARD_REGS
)
1783 EXECUTE_IF_SET_IN_BITMAP (df
->exit_block_uses
, 0, i
, bi
)
1784 df_uses_record (dflow
, ®no_reg_rtx
[i
],
1785 DF_REF_REG_USE
, EXIT_BLOCK_PTR
, NULL
,
1789 static bool initialized
= false;
1791 /* Initialize some platform specific structures. */
1794 df_hard_reg_init (void)
1797 #ifdef ELIMINABLE_REGS
1798 static const struct {const int from
, to
; } eliminables
[] = ELIMINABLE_REGS
;
1800 /* After reload, some ports add certain bits to regs_ever_live so
1801 this cannot be reset. */
1803 if (!reload_completed
)
1804 memset (regs_ever_live
, 0, sizeof (regs_ever_live
));
1809 bitmap_obstack_initialize (&persistent_obstack
);
1811 /* Record which registers will be eliminated. We use this in
1813 CLEAR_HARD_REG_SET (elim_reg_set
);
1815 #ifdef ELIMINABLE_REGS
1816 for (i
= 0; i
< (int) ARRAY_SIZE (eliminables
); i
++)
1817 SET_HARD_REG_BIT (elim_reg_set
, eliminables
[i
].from
);
1819 SET_HARD_REG_BIT (elim_reg_set
, FRAME_POINTER_REGNUM
);
1822 df_invalidated_by_call
= BITMAP_ALLOC (&persistent_obstack
);
1824 /* Inconveniently, this is only readily available in hard reg set
1826 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; ++i
)
1827 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1828 bitmap_set_bit (df_invalidated_by_call
, i
);
1830 df_all_hard_regs
= BITMAP_ALLOC (&persistent_obstack
);
1832 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1833 bitmap_set_bit (df_all_hard_regs
, i
);