1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
31 #include "stor-layout.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "expr.h" /* init_return_column_size */
39 #include "output.h" /* asm_out_file */
40 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
43 /* ??? Poison these here until it can be done generically. They've been
44 totally replaced in this file; make sure it stays that way. */
45 #undef DWARF2_UNWIND_INFO
46 #undef DWARF2_FRAME_INFO
47 #if (GCC_VERSION >= 3000)
48 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
51 #ifndef INCOMING_RETURN_ADDR_RTX
52 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
55 /* Maximum size (in bytes) of an artificially generated label. */
56 #define MAX_ARTIFICIAL_LABEL_BYTES 30
58 /* A collected description of an entire row of the abstract CFI table. */
59 struct GTY(()) dw_cfi_row
61 /* The expression that computes the CFA, expressed in two different ways.
62 The CFA member for the simple cases, and the full CFI expression for
63 the complex cases. The later will be a DW_CFA_cfa_expression. */
67 /* The expressions for any register column that is saved. */
71 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
72 struct GTY(()) reg_saved_in_data
{
78 /* Since we no longer have a proper CFG, we're going to create a facsimile
79 of one on the fly while processing the frame-related insns.
81 We create dw_trace_info structures for each extended basic block beginning
82 and ending at a "save point". Save points are labels, barriers, certain
83 notes, and of course the beginning and end of the function.
85 As we encounter control transfer insns, we propagate the "current"
86 row state across the edges to the starts of traces. When checking is
87 enabled, we validate that we propagate the same data from all sources.
89 All traces are members of the TRACE_INFO array, in the order in which
90 they appear in the instruction stream.
92 All save points are present in the TRACE_INDEX hash, mapping the insn
93 starting a trace to the dw_trace_info describing the trace. */
97 /* The insn that begins the trace. */
100 /* The row state at the beginning and end of the trace. */
101 dw_cfi_row
*beg_row
, *end_row
;
103 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
104 while scanning insns. However, the args_size value is irrelevant at
105 any point except can_throw_internal_p insns. Therefore the "delay"
106 sizes the values that must actually be emitted for this trace. */
107 HOST_WIDE_INT beg_true_args_size
, end_true_args_size
;
108 HOST_WIDE_INT beg_delay_args_size
, end_delay_args_size
;
110 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
113 /* The following variables contain data used in interpreting frame related
114 expressions. These are not part of the "real" row state as defined by
115 Dwarf, but it seems like they need to be propagated into a trace in case
116 frame related expressions have been sunk. */
117 /* ??? This seems fragile. These variables are fragments of a larger
118 expression. If we do not keep the entire expression together, we risk
119 not being able to put it together properly. Consider forcing targets
120 to generate self-contained expressions and dropping all of the magic
121 interpretation code in this file. Or at least refusing to shrink wrap
122 any frame related insn that doesn't contain a complete expression. */
124 /* The register used for saving registers to the stack, and its offset
126 dw_cfa_location cfa_store
;
128 /* A temporary register holding an integral value used in adjusting SP
129 or setting up the store_reg. The "offset" field holds the integer
130 value, not an offset. */
131 dw_cfa_location cfa_temp
;
133 /* A set of registers saved in other registers. This is the inverse of
134 the row->reg_save info, if the entry is a DW_CFA_register. This is
135 implemented as a flat array because it normally contains zero or 1
136 entry, depending on the target. IA-64 is the big spender here, using
137 a maximum of 5 entries. */
138 vec
<reg_saved_in_data
> regs_saved_in_regs
;
140 /* An identifier for this trace. Used only for debugging dumps. */
143 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
144 bool switch_sections
;
146 /* True if we've seen different values incoming to beg_true_args_size. */
147 bool args_size_undefined
;
151 /* Hashtable helpers. */
153 struct trace_info_hasher
: nofree_ptr_hash
<dw_trace_info
>
155 static inline hashval_t
hash (const dw_trace_info
*);
156 static inline bool equal (const dw_trace_info
*, const dw_trace_info
*);
160 trace_info_hasher::hash (const dw_trace_info
*ti
)
162 return INSN_UID (ti
->head
);
166 trace_info_hasher::equal (const dw_trace_info
*a
, const dw_trace_info
*b
)
168 return a
->head
== b
->head
;
172 /* The variables making up the pseudo-cfg, as described above. */
173 static vec
<dw_trace_info
> trace_info
;
174 static vec
<dw_trace_info
*> trace_work_list
;
175 static hash_table
<trace_info_hasher
> *trace_index
;
177 /* A vector of call frame insns for the CIE. */
180 /* The state of the first row of the FDE table, which includes the
181 state provided by the CIE. */
182 static GTY(()) dw_cfi_row
*cie_cfi_row
;
184 static GTY(()) reg_saved_in_data
*cie_return_save
;
186 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
188 /* The insn after which a new CFI note should be emitted. */
189 static rtx_insn
*add_cfi_insn
;
191 /* When non-null, add_cfi will add the CFI to this vector. */
192 static cfi_vec
*add_cfi_vec
;
194 /* The current instruction trace. */
195 static dw_trace_info
*cur_trace
;
197 /* The current, i.e. most recently generated, row of the CFI table. */
198 static dw_cfi_row
*cur_row
;
200 /* A copy of the current CFA, for use during the processing of a
202 static dw_cfa_location
*cur_cfa
;
204 /* We delay emitting a register save until either (a) we reach the end
205 of the prologue or (b) the register is clobbered. This clusters
206 register saves so that there are fewer pc advances. */
208 struct queued_reg_save
{
211 HOST_WIDE_INT cfa_offset
;
215 static vec
<queued_reg_save
> queued_reg_saves
;
217 /* True if any CFI directives were emitted at the current insn. */
218 static bool any_cfis_emitted
;
220 /* Short-hand for commonly used register numbers. */
221 static unsigned dw_stack_pointer_regnum
;
222 static unsigned dw_frame_pointer_regnum
;
224 /* Hook used by __throw. */
227 expand_builtin_dwarf_sp_column (void)
229 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
230 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
233 /* MEM is a memory reference for the register size table, each element of
234 which has mode MODE. Initialize column C as a return address column. */
237 init_return_column_size (machine_mode mode
, rtx mem
, unsigned int c
)
239 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
240 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
241 emit_move_insn (adjust_address (mem
, mode
, offset
),
242 gen_int_mode (size
, mode
));
245 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
246 init_one_dwarf_reg_size to communicate on what has been done by the
249 struct init_one_dwarf_reg_state
251 /* Whether the dwarf return column was initialized. */
252 bool wrote_return_column
;
254 /* For each hard register REGNO, whether init_one_dwarf_reg_size
255 was given REGNO to process already. */
256 bool processed_regno
[FIRST_PSEUDO_REGISTER
];
260 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
261 initialize the dwarf register size table entry corresponding to register
262 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
263 use for the size entry to initialize, and INIT_STATE is the communication
264 datastructure conveying what we're doing to our caller. */
267 void init_one_dwarf_reg_size (int regno
, machine_mode regmode
,
268 rtx table
, machine_mode slotmode
,
269 init_one_dwarf_reg_state
*init_state
)
271 const unsigned int dnum
= DWARF_FRAME_REGNUM (regno
);
272 const unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
273 const unsigned int dcol
= DWARF_REG_TO_UNWIND_COLUMN (rnum
);
275 const HOST_WIDE_INT slotoffset
= dcol
* GET_MODE_SIZE (slotmode
);
276 const HOST_WIDE_INT regsize
= GET_MODE_SIZE (regmode
);
278 init_state
->processed_regno
[regno
] = true;
280 if (rnum
>= DWARF_FRAME_REGISTERS
)
283 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
285 if (regmode
== VOIDmode
)
287 init_state
->wrote_return_column
= true;
293 emit_move_insn (adjust_address (table
, slotmode
, slotoffset
),
294 gen_int_mode (regsize
, slotmode
));
297 /* Generate code to initialize the dwarf register size table located
298 at the provided ADDRESS. */
301 expand_builtin_init_dwarf_reg_sizes (tree address
)
304 machine_mode mode
= TYPE_MODE (char_type_node
);
305 rtx addr
= expand_normal (address
);
306 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
308 init_one_dwarf_reg_state init_state
;
310 memset ((char *)&init_state
, 0, sizeof (init_state
));
312 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
314 machine_mode save_mode
;
317 /* No point in processing a register multiple times. This could happen
318 with register spans, e.g. when a reg is first processed as a piece of
319 a span, then as a register on its own later on. */
321 if (init_state
.processed_regno
[i
])
324 save_mode
= targetm
.dwarf_frame_reg_mode (i
);
325 span
= targetm
.dwarf_register_span (gen_rtx_REG (save_mode
, i
));
328 init_one_dwarf_reg_size (i
, save_mode
, mem
, mode
, &init_state
);
331 for (int si
= 0; si
< XVECLEN (span
, 0); si
++)
333 rtx reg
= XVECEXP (span
, 0, si
);
335 init_one_dwarf_reg_size
336 (REGNO (reg
), GET_MODE (reg
), mem
, mode
, &init_state
);
341 if (!init_state
.wrote_return_column
)
342 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
344 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
345 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
348 targetm
.init_dwarf_reg_sizes_extra (address
);
352 static dw_trace_info
*
353 get_trace_info (rtx_insn
*insn
)
357 return trace_index
->find_with_hash (&dummy
, INSN_UID (insn
));
361 save_point_p (rtx_insn
*insn
)
363 /* Labels, except those that are really jump tables. */
365 return inside_basic_block_p (insn
);
367 /* We split traces at the prologue/epilogue notes because those
368 are points at which the unwind info is usually stable. This
369 makes it easier to find spots with identical unwind info so
370 that we can use remember/restore_state opcodes. */
372 switch (NOTE_KIND (insn
))
374 case NOTE_INSN_PROLOGUE_END
:
375 case NOTE_INSN_EPILOGUE_BEG
:
382 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
384 static inline HOST_WIDE_INT
385 div_data_align (HOST_WIDE_INT off
)
387 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
388 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
392 /* Return true if we need a signed version of a given opcode
393 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
396 need_data_align_sf_opcode (HOST_WIDE_INT off
)
398 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
401 /* Return a pointer to a newly allocated Call Frame Instruction. */
403 static inline dw_cfi_ref
406 dw_cfi_ref cfi
= ggc_alloc
<dw_cfi_node
> ();
408 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
409 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
414 /* Return a newly allocated CFI row, with no defined data. */
419 dw_cfi_row
*row
= ggc_cleared_alloc
<dw_cfi_row
> ();
421 row
->cfa
.reg
= INVALID_REGNUM
;
426 /* Return a copy of an existing CFI row. */
429 copy_cfi_row (dw_cfi_row
*src
)
431 dw_cfi_row
*dst
= ggc_alloc
<dw_cfi_row
> ();
434 dst
->reg_save
= vec_safe_copy (src
->reg_save
);
439 /* Generate a new label for the CFI info to refer to. */
442 dwarf2out_cfi_label (void)
444 int num
= dwarf2out_cfi_label_num
++;
447 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
449 return xstrdup (label
);
452 /* Add CFI either to the current insn stream or to a vector, or both. */
455 add_cfi (dw_cfi_ref cfi
)
457 any_cfis_emitted
= true;
459 if (add_cfi_insn
!= NULL
)
461 add_cfi_insn
= emit_note_after (NOTE_INSN_CFI
, add_cfi_insn
);
462 NOTE_CFI (add_cfi_insn
) = cfi
;
465 if (add_cfi_vec
!= NULL
)
466 vec_safe_push (*add_cfi_vec
, cfi
);
470 add_cfi_args_size (HOST_WIDE_INT size
)
472 dw_cfi_ref cfi
= new_cfi ();
474 /* While we can occasionally have args_size < 0 internally, this state
475 should not persist at a point we actually need an opcode. */
476 gcc_assert (size
>= 0);
478 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
479 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= size
;
485 add_cfi_restore (unsigned reg
)
487 dw_cfi_ref cfi
= new_cfi ();
489 cfi
->dw_cfi_opc
= (reg
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
490 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
495 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
496 that the register column is no longer saved. */
499 update_row_reg_save (dw_cfi_row
*row
, unsigned column
, dw_cfi_ref cfi
)
501 if (vec_safe_length (row
->reg_save
) <= column
)
502 vec_safe_grow_cleared (row
->reg_save
, column
+ 1);
503 (*row
->reg_save
)[column
] = cfi
;
506 /* This function fills in aa dw_cfa_location structure from a dwarf location
507 descriptor sequence. */
510 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_node
*loc
)
512 struct dw_loc_descr_node
*ptr
;
514 cfa
->base_offset
= 0;
518 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
520 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
556 cfa
->reg
= op
- DW_OP_reg0
;
559 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
593 cfa
->reg
= op
- DW_OP_breg0
;
594 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
597 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
598 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
603 case DW_OP_plus_uconst
:
604 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
612 /* Find the previous value for the CFA, iteratively. CFI is the opcode
613 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
614 one level of remember/restore state processing. */
617 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
619 switch (cfi
->dw_cfi_opc
)
621 case DW_CFA_def_cfa_offset
:
622 case DW_CFA_def_cfa_offset_sf
:
623 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
625 case DW_CFA_def_cfa_register
:
626 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
629 case DW_CFA_def_cfa_sf
:
630 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
631 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
633 case DW_CFA_def_cfa_expression
:
634 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
637 case DW_CFA_remember_state
:
638 gcc_assert (!remember
->in_use
);
640 remember
->in_use
= 1;
642 case DW_CFA_restore_state
:
643 gcc_assert (remember
->in_use
);
645 remember
->in_use
= 0;
653 /* Determine if two dw_cfa_location structures define the same data. */
656 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
658 return (loc1
->reg
== loc2
->reg
659 && loc1
->offset
== loc2
->offset
660 && loc1
->indirect
== loc2
->indirect
661 && (loc1
->indirect
== 0
662 || loc1
->base_offset
== loc2
->base_offset
));
665 /* Determine if two CFI operands are identical. */
668 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t
, dw_cfi_oprnd
*a
, dw_cfi_oprnd
*b
)
672 case dw_cfi_oprnd_unused
:
674 case dw_cfi_oprnd_reg_num
:
675 return a
->dw_cfi_reg_num
== b
->dw_cfi_reg_num
;
676 case dw_cfi_oprnd_offset
:
677 return a
->dw_cfi_offset
== b
->dw_cfi_offset
;
678 case dw_cfi_oprnd_addr
:
679 return (a
->dw_cfi_addr
== b
->dw_cfi_addr
680 || strcmp (a
->dw_cfi_addr
, b
->dw_cfi_addr
) == 0);
681 case dw_cfi_oprnd_loc
:
682 return loc_descr_equal_p (a
->dw_cfi_loc
, b
->dw_cfi_loc
);
687 /* Determine if two CFI entries are identical. */
690 cfi_equal_p (dw_cfi_ref a
, dw_cfi_ref b
)
692 enum dwarf_call_frame_info opc
;
694 /* Make things easier for our callers, including missing operands. */
697 if (a
== NULL
|| b
== NULL
)
700 /* Obviously, the opcodes must match. */
702 if (opc
!= b
->dw_cfi_opc
)
705 /* Compare the two operands, re-using the type of the operands as
706 already exposed elsewhere. */
707 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc
),
708 &a
->dw_cfi_oprnd1
, &b
->dw_cfi_oprnd1
)
709 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc
),
710 &a
->dw_cfi_oprnd2
, &b
->dw_cfi_oprnd2
));
713 /* Determine if two CFI_ROW structures are identical. */
716 cfi_row_equal_p (dw_cfi_row
*a
, dw_cfi_row
*b
)
718 size_t i
, n_a
, n_b
, n_max
;
722 if (!cfi_equal_p (a
->cfa_cfi
, b
->cfa_cfi
))
725 else if (!cfa_equal_p (&a
->cfa
, &b
->cfa
))
728 n_a
= vec_safe_length (a
->reg_save
);
729 n_b
= vec_safe_length (b
->reg_save
);
730 n_max
= MAX (n_a
, n_b
);
732 for (i
= 0; i
< n_max
; ++i
)
734 dw_cfi_ref r_a
= NULL
, r_b
= NULL
;
737 r_a
= (*a
->reg_save
)[i
];
739 r_b
= (*b
->reg_save
)[i
];
741 if (!cfi_equal_p (r_a
, r_b
))
748 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
749 what opcode to emit. Returns the CFI opcode to effect the change, or
750 NULL if NEW_CFA == OLD_CFA. */
753 def_cfa_0 (dw_cfa_location
*old_cfa
, dw_cfa_location
*new_cfa
)
757 /* If nothing changed, no need to issue any call frame instructions. */
758 if (cfa_equal_p (old_cfa
, new_cfa
))
763 if (new_cfa
->reg
== old_cfa
->reg
&& !new_cfa
->indirect
&& !old_cfa
->indirect
)
765 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
766 the CFA register did not change but the offset did. The data
767 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
768 in the assembler via the .cfi_def_cfa_offset directive. */
769 if (new_cfa
->offset
< 0)
770 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
772 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
773 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= new_cfa
->offset
;
775 else if (new_cfa
->offset
== old_cfa
->offset
776 && old_cfa
->reg
!= INVALID_REGNUM
777 && !new_cfa
->indirect
778 && !old_cfa
->indirect
)
780 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
781 indicating the CFA register has changed to <register> but the
782 offset has not changed. */
783 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
784 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
786 else if (new_cfa
->indirect
== 0)
788 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
789 indicating the CFA register has changed to <register> with
790 the specified offset. The data factoring for DW_CFA_def_cfa_sf
791 happens in output_cfi, or in the assembler via the .cfi_def_cfa
793 if (new_cfa
->offset
< 0)
794 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
796 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
797 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
798 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= new_cfa
->offset
;
802 /* Construct a DW_CFA_def_cfa_expression instruction to
803 calculate the CFA using a full location expression since no
804 register-offset pair is available. */
805 struct dw_loc_descr_node
*loc_list
;
807 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
808 loc_list
= build_cfa_loc (new_cfa
, 0);
809 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
815 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
818 def_cfa_1 (dw_cfa_location
*new_cfa
)
822 if (cur_trace
->cfa_store
.reg
== new_cfa
->reg
&& new_cfa
->indirect
== 0)
823 cur_trace
->cfa_store
.offset
= new_cfa
->offset
;
825 cfi
= def_cfa_0 (&cur_row
->cfa
, new_cfa
);
828 cur_row
->cfa
= *new_cfa
;
829 cur_row
->cfa_cfi
= (cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
836 /* Add the CFI for saving a register. REG is the CFA column number.
837 If SREG is -1, the register is saved at OFFSET from the CFA;
838 otherwise it is saved in SREG. */
841 reg_save (unsigned int reg
, unsigned int sreg
, HOST_WIDE_INT offset
)
843 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
844 dw_cfi_ref cfi
= new_cfi ();
846 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
848 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
850 && fde
->stack_realign
851 && sreg
== INVALID_REGNUM
)
853 cfi
->dw_cfi_opc
= DW_CFA_expression
;
854 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
855 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
856 = build_cfa_aligned_loc (&cur_row
->cfa
, offset
,
857 fde
->stack_realignment
);
859 else if (sreg
== INVALID_REGNUM
)
861 if (need_data_align_sf_opcode (offset
))
862 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
863 else if (reg
& ~0x3f)
864 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
866 cfi
->dw_cfi_opc
= DW_CFA_offset
;
867 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= offset
;
869 else if (sreg
== reg
)
871 /* While we could emit something like DW_CFA_same_value or
872 DW_CFA_restore, we never expect to see something like that
873 in a prologue. This is more likely to be a bug. A backend
874 can always bypass this by using REG_CFA_RESTORE directly. */
879 cfi
->dw_cfi_opc
= DW_CFA_register
;
880 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
884 update_row_reg_save (cur_row
, reg
, cfi
);
887 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
888 and adjust data structures to match. */
891 notice_args_size (rtx_insn
*insn
)
893 HOST_WIDE_INT args_size
, delta
;
896 note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL
);
900 args_size
= INTVAL (XEXP (note
, 0));
901 delta
= args_size
- cur_trace
->end_true_args_size
;
905 cur_trace
->end_true_args_size
= args_size
;
907 /* If the CFA is computed off the stack pointer, then we must adjust
908 the computation of the CFA as well. */
909 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
911 gcc_assert (!cur_cfa
->indirect
);
913 /* Convert a change in args_size (always a positive in the
914 direction of stack growth) to a change in stack pointer. */
915 if (!STACK_GROWS_DOWNWARD
)
918 cur_cfa
->offset
+= delta
;
922 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
923 data within the trace related to EH insns and args_size. */
926 notice_eh_throw (rtx_insn
*insn
)
928 HOST_WIDE_INT args_size
;
930 args_size
= cur_trace
->end_true_args_size
;
931 if (cur_trace
->eh_head
== NULL
)
933 cur_trace
->eh_head
= insn
;
934 cur_trace
->beg_delay_args_size
= args_size
;
935 cur_trace
->end_delay_args_size
= args_size
;
937 else if (cur_trace
->end_delay_args_size
!= args_size
)
939 cur_trace
->end_delay_args_size
= args_size
;
941 /* ??? If the CFA is the stack pointer, search backward for the last
942 CFI note and insert there. Given that the stack changed for the
943 args_size change, there *must* be such a note in between here and
945 add_cfi_args_size (args_size
);
949 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
950 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
951 used in places where rtl is prohibited. */
953 static inline unsigned
954 dwf_regno (const_rtx reg
)
956 gcc_assert (REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
957 return DWARF_FRAME_REGNUM (REGNO (reg
));
960 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
963 compare_reg_or_pc (rtx x
, rtx y
)
965 if (REG_P (x
) && REG_P (y
))
966 return REGNO (x
) == REGNO (y
);
970 /* Record SRC as being saved in DEST. DEST may be null to delete an
971 existing entry. SRC may be a register or PC_RTX. */
974 record_reg_saved_in_reg (rtx dest
, rtx src
)
976 reg_saved_in_data
*elt
;
979 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, elt
)
980 if (compare_reg_or_pc (elt
->orig_reg
, src
))
983 cur_trace
->regs_saved_in_regs
.unordered_remove (i
);
985 elt
->saved_in_reg
= dest
;
992 reg_saved_in_data e
= {src
, dest
};
993 cur_trace
->regs_saved_in_regs
.safe_push (e
);
996 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
997 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1000 queue_reg_save (rtx reg
, rtx sreg
, HOST_WIDE_INT offset
)
1003 queued_reg_save e
= {reg
, sreg
, offset
};
1006 /* Duplicates waste space, but it's also necessary to remove them
1007 for correctness, since the queue gets output in reverse order. */
1008 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1009 if (compare_reg_or_pc (q
->reg
, reg
))
1015 queued_reg_saves
.safe_push (e
);
1018 /* Output all the entries in QUEUED_REG_SAVES. */
1021 dwarf2out_flush_queued_reg_saves (void)
1026 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1028 unsigned int reg
, sreg
;
1030 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1032 if (q
->reg
== pc_rtx
)
1033 reg
= DWARF_FRAME_RETURN_COLUMN
;
1035 reg
= dwf_regno (q
->reg
);
1037 sreg
= dwf_regno (q
->saved_reg
);
1039 sreg
= INVALID_REGNUM
;
1040 reg_save (reg
, sreg
, q
->cfa_offset
);
1043 queued_reg_saves
.truncate (0);
1046 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1047 location for? Or, does it clobber a register which we've previously
1048 said that some other register is saved in, and for which we now
1049 have a new location for? */
1052 clobbers_queued_reg_save (const_rtx insn
)
1057 FOR_EACH_VEC_ELT (queued_reg_saves
, iq
, q
)
1060 reg_saved_in_data
*rir
;
1062 if (modified_in_p (q
->reg
, insn
))
1065 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, ir
, rir
)
1066 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1067 && modified_in_p (rir
->saved_in_reg
, insn
))
1074 /* What register, if any, is currently saved in REG? */
1077 reg_saved_in (rtx reg
)
1079 unsigned int regn
= REGNO (reg
);
1081 reg_saved_in_data
*rir
;
1084 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1085 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1088 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, rir
)
1089 if (regn
== REGNO (rir
->saved_in_reg
))
1090 return rir
->orig_reg
;
1095 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1098 dwarf2out_frame_debug_def_cfa (rtx pat
)
1100 memset (cur_cfa
, 0, sizeof (*cur_cfa
));
1102 if (GET_CODE (pat
) == PLUS
)
1104 cur_cfa
->offset
= INTVAL (XEXP (pat
, 1));
1105 pat
= XEXP (pat
, 0);
1109 cur_cfa
->indirect
= 1;
1110 pat
= XEXP (pat
, 0);
1111 if (GET_CODE (pat
) == PLUS
)
1113 cur_cfa
->base_offset
= INTVAL (XEXP (pat
, 1));
1114 pat
= XEXP (pat
, 0);
1117 /* ??? If this fails, we could be calling into the _loc functions to
1118 define a full expression. So far no port does that. */
1119 gcc_assert (REG_P (pat
));
1120 cur_cfa
->reg
= dwf_regno (pat
);
1123 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1126 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1130 gcc_assert (GET_CODE (pat
) == SET
);
1131 dest
= XEXP (pat
, 0);
1132 src
= XEXP (pat
, 1);
1134 switch (GET_CODE (src
))
1137 gcc_assert (dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1138 cur_cfa
->offset
-= INTVAL (XEXP (src
, 1));
1148 cur_cfa
->reg
= dwf_regno (dest
);
1149 gcc_assert (cur_cfa
->indirect
== 0);
1152 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1155 dwarf2out_frame_debug_cfa_offset (rtx set
)
1157 HOST_WIDE_INT offset
;
1158 rtx src
, addr
, span
;
1159 unsigned int sregno
;
1161 src
= XEXP (set
, 1);
1162 addr
= XEXP (set
, 0);
1163 gcc_assert (MEM_P (addr
));
1164 addr
= XEXP (addr
, 0);
1166 /* As documented, only consider extremely simple addresses. */
1167 switch (GET_CODE (addr
))
1170 gcc_assert (dwf_regno (addr
) == cur_cfa
->reg
);
1171 offset
= -cur_cfa
->offset
;
1174 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cur_cfa
->reg
);
1175 offset
= INTVAL (XEXP (addr
, 1)) - cur_cfa
->offset
;
1184 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1188 span
= targetm
.dwarf_register_span (src
);
1189 sregno
= dwf_regno (src
);
1192 /* ??? We'd like to use queue_reg_save, but we need to come up with
1193 a different flushing heuristic for epilogues. */
1195 reg_save (sregno
, INVALID_REGNUM
, offset
);
1198 /* We have a PARALLEL describing where the contents of SRC live.
1199 Adjust the offset for each piece of the PARALLEL. */
1200 HOST_WIDE_INT span_offset
= offset
;
1202 gcc_assert (GET_CODE (span
) == PARALLEL
);
1204 const int par_len
= XVECLEN (span
, 0);
1205 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1207 rtx elem
= XVECEXP (span
, 0, par_index
);
1208 sregno
= dwf_regno (src
);
1209 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1210 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1215 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1218 dwarf2out_frame_debug_cfa_register (rtx set
)
1221 unsigned sregno
, dregno
;
1223 src
= XEXP (set
, 1);
1224 dest
= XEXP (set
, 0);
1226 record_reg_saved_in_reg (dest
, src
);
1228 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1230 sregno
= dwf_regno (src
);
1232 dregno
= dwf_regno (dest
);
1234 /* ??? We'd like to use queue_reg_save, but we need to come up with
1235 a different flushing heuristic for epilogues. */
1236 reg_save (sregno
, dregno
, 0);
1239 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1242 dwarf2out_frame_debug_cfa_expression (rtx set
)
1244 rtx src
, dest
, span
;
1245 dw_cfi_ref cfi
= new_cfi ();
1248 dest
= SET_DEST (set
);
1249 src
= SET_SRC (set
);
1251 gcc_assert (REG_P (src
));
1252 gcc_assert (MEM_P (dest
));
1254 span
= targetm
.dwarf_register_span (src
);
1257 regno
= dwf_regno (src
);
1259 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1260 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1261 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1262 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1263 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1265 /* ??? We'd like to use queue_reg_save, were the interface different,
1266 and, as above, we could manage flushing for epilogues. */
1268 update_row_reg_save (cur_row
, regno
, cfi
);
1271 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1274 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1276 gcc_assert (REG_P (reg
));
1278 rtx span
= targetm
.dwarf_register_span (reg
);
1281 unsigned int regno
= dwf_regno (reg
);
1282 add_cfi_restore (regno
);
1283 update_row_reg_save (cur_row
, regno
, NULL
);
1287 /* We have a PARALLEL describing where the contents of REG live.
1288 Restore the register for each piece of the PARALLEL. */
1289 gcc_assert (GET_CODE (span
) == PARALLEL
);
1291 const int par_len
= XVECLEN (span
, 0);
1292 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1294 reg
= XVECEXP (span
, 0, par_index
);
1295 gcc_assert (REG_P (reg
));
1296 unsigned int regno
= dwf_regno (reg
);
1297 add_cfi_restore (regno
);
1298 update_row_reg_save (cur_row
, regno
, NULL
);
1303 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1304 ??? Perhaps we should note in the CIE where windows are saved (instead of
1305 assuming 0(cfa)) and what registers are in the window. */
1308 dwarf2out_frame_debug_cfa_window_save (void)
1310 dw_cfi_ref cfi
= new_cfi ();
1312 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1316 /* Record call frame debugging information for an expression EXPR,
1317 which either sets SP or FP (adjusting how we calculate the frame
1318 address) or saves a register to the stack or another register.
1319 LABEL indicates the address of EXPR.
1321 This function encodes a state machine mapping rtxes to actions on
1322 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1323 users need not read the source code.
1325 The High-Level Picture
1327 Changes in the register we use to calculate the CFA: Currently we
1328 assume that if you copy the CFA register into another register, we
1329 should take the other one as the new CFA register; this seems to
1330 work pretty well. If it's wrong for some target, it's simple
1331 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1333 Changes in the register we use for saving registers to the stack:
1334 This is usually SP, but not always. Again, we deduce that if you
1335 copy SP into another register (and SP is not the CFA register),
1336 then the new register is the one we will be using for register
1337 saves. This also seems to work.
1339 Register saves: There's not much guesswork about this one; if
1340 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1341 register save, and the register used to calculate the destination
1342 had better be the one we think we're using for this purpose.
1343 It's also assumed that a copy from a call-saved register to another
1344 register is saving that register if RTX_FRAME_RELATED_P is set on
1345 that instruction. If the copy is from a call-saved register to
1346 the *same* register, that means that the register is now the same
1347 value as in the caller.
1349 Except: If the register being saved is the CFA register, and the
1350 offset is nonzero, we are saving the CFA, so we assume we have to
1351 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1352 the intent is to save the value of SP from the previous frame.
1354 In addition, if a register has previously been saved to a different
1357 Invariants / Summaries of Rules
1359 cfa current rule for calculating the CFA. It usually
1360 consists of a register and an offset. This is
1361 actually stored in *cur_cfa, but abbreviated
1362 for the purposes of this documentation.
1363 cfa_store register used by prologue code to save things to the stack
1364 cfa_store.offset is the offset from the value of
1365 cfa_store.reg to the actual CFA
1366 cfa_temp register holding an integral value. cfa_temp.offset
1367 stores the value, which will be used to adjust the
1368 stack pointer. cfa_temp is also used like cfa_store,
1369 to track stores to the stack via fp or a temp reg.
1371 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1372 with cfa.reg as the first operand changes the cfa.reg and its
1373 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1376 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1377 expression yielding a constant. This sets cfa_temp.reg
1378 and cfa_temp.offset.
1380 Rule 5: Create a new register cfa_store used to save items to the
1383 Rules 10-14: Save a register to the stack. Define offset as the
1384 difference of the original location and cfa_store's
1385 location (or cfa_temp's location if cfa_temp is used).
1387 Rules 16-20: If AND operation happens on sp in prologue, we assume
1388 stack is realigned. We will use a group of DW_OP_XXX
1389 expressions to represent the location of the stored
1390 register instead of CFA+offset.
1394 "{a,b}" indicates a choice of a xor b.
1395 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1398 (set <reg1> <reg2>:cfa.reg)
1399 effects: cfa.reg = <reg1>
1400 cfa.offset unchanged
1401 cfa_temp.reg = <reg1>
1402 cfa_temp.offset = cfa.offset
1405 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1406 {<const_int>,<reg>:cfa_temp.reg}))
1407 effects: cfa.reg = sp if fp used
1408 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1409 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1410 if cfa_store.reg==sp
1413 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1414 effects: cfa.reg = fp
1415 cfa_offset += +/- <const_int>
1418 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1419 constraints: <reg1> != fp
1421 effects: cfa.reg = <reg1>
1422 cfa_temp.reg = <reg1>
1423 cfa_temp.offset = cfa.offset
1426 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1427 constraints: <reg1> != fp
1429 effects: cfa_store.reg = <reg1>
1430 cfa_store.offset = cfa.offset - cfa_temp.offset
1433 (set <reg> <const_int>)
1434 effects: cfa_temp.reg = <reg>
1435 cfa_temp.offset = <const_int>
1438 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1439 effects: cfa_temp.reg = <reg1>
1440 cfa_temp.offset |= <const_int>
1443 (set <reg> (high <exp>))
1447 (set <reg> (lo_sum <exp> <const_int>))
1448 effects: cfa_temp.reg = <reg>
1449 cfa_temp.offset = <const_int>
1452 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1453 effects: cfa_store.offset -= <const_int>
1454 cfa.offset = cfa_store.offset if cfa.reg == sp
1456 cfa.base_offset = -cfa_store.offset
1459 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1460 effects: cfa_store.offset += -/+ mode_size(mem)
1461 cfa.offset = cfa_store.offset if cfa.reg == sp
1463 cfa.base_offset = -cfa_store.offset
1466 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1469 effects: cfa.reg = <reg1>
1470 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1473 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1474 effects: cfa.reg = <reg1>
1475 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1478 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1479 effects: cfa.reg = <reg1>
1480 cfa.base_offset = -cfa_temp.offset
1481 cfa_temp.offset -= mode_size(mem)
1484 (set <reg> {unspec, unspec_volatile})
1485 effects: target-dependent
1488 (set sp (and: sp <const_int>))
1489 constraints: cfa_store.reg == sp
1490 effects: cfun->fde.stack_realign = 1
1491 cfa_store.offset = 0
1492 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1495 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1496 effects: cfa_store.offset += -/+ mode_size(mem)
1499 (set (mem ({pre_inc, pre_dec} sp)) fp)
1500 constraints: fde->stack_realign == 1
1501 effects: cfa_store.offset = 0
1502 cfa.reg != HARD_FRAME_POINTER_REGNUM
1505 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1506 constraints: fde->stack_realign == 1
1508 && cfa.indirect == 0
1509 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1510 effects: Use DW_CFA_def_cfa_expression to define cfa
1511 cfa.reg == fde->drap_reg */
1514 dwarf2out_frame_debug_expr (rtx expr
)
1516 rtx src
, dest
, span
;
1517 HOST_WIDE_INT offset
;
1520 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1521 the PARALLEL independently. The first element is always processed if
1522 it is a SET. This is for backward compatibility. Other elements
1523 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1524 flag is set in them. */
1525 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1528 int limit
= XVECLEN (expr
, 0);
1531 /* PARALLELs have strict read-modify-write semantics, so we
1532 ought to evaluate every rvalue before changing any lvalue.
1533 It's cumbersome to do that in general, but there's an
1534 easy approximation that is enough for all current users:
1535 handle register saves before register assignments. */
1536 if (GET_CODE (expr
) == PARALLEL
)
1537 for (par_index
= 0; par_index
< limit
; par_index
++)
1539 elem
= XVECEXP (expr
, 0, par_index
);
1540 if (GET_CODE (elem
) == SET
1541 && MEM_P (SET_DEST (elem
))
1542 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1543 dwarf2out_frame_debug_expr (elem
);
1546 for (par_index
= 0; par_index
< limit
; par_index
++)
1548 elem
= XVECEXP (expr
, 0, par_index
);
1549 if (GET_CODE (elem
) == SET
1550 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1551 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1552 dwarf2out_frame_debug_expr (elem
);
1557 gcc_assert (GET_CODE (expr
) == SET
);
1559 src
= SET_SRC (expr
);
1560 dest
= SET_DEST (expr
);
1564 rtx rsi
= reg_saved_in (src
);
1571 switch (GET_CODE (dest
))
1574 switch (GET_CODE (src
))
1576 /* Setting FP from SP. */
1578 if (cur_cfa
->reg
== dwf_regno (src
))
1581 /* Update the CFA rule wrt SP or FP. Make sure src is
1582 relative to the current CFA register.
1584 We used to require that dest be either SP or FP, but the
1585 ARM copies SP to a temporary register, and from there to
1586 FP. So we just rely on the backends to only set
1587 RTX_FRAME_RELATED_P on appropriate insns. */
1588 cur_cfa
->reg
= dwf_regno (dest
);
1589 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1590 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1594 /* Saving a register in a register. */
1595 gcc_assert (!fixed_regs
[REGNO (dest
)]
1596 /* For the SPARC and its register window. */
1597 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1599 /* After stack is aligned, we can only save SP in FP
1600 if drap register is used. In this case, we have
1601 to restore stack pointer with the CFA value and we
1602 don't generate this DWARF information. */
1604 && fde
->stack_realign
1605 && REGNO (src
) == STACK_POINTER_REGNUM
)
1606 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1607 && fde
->drap_reg
!= INVALID_REGNUM
1608 && cur_cfa
->reg
!= dwf_regno (src
));
1610 queue_reg_save (src
, dest
, 0);
1617 if (dest
== stack_pointer_rtx
)
1621 switch (GET_CODE (XEXP (src
, 1)))
1624 offset
= INTVAL (XEXP (src
, 1));
1627 gcc_assert (dwf_regno (XEXP (src
, 1))
1628 == cur_trace
->cfa_temp
.reg
);
1629 offset
= cur_trace
->cfa_temp
.offset
;
1635 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1637 /* Restoring SP from FP in the epilogue. */
1638 gcc_assert (cur_cfa
->reg
== dw_frame_pointer_regnum
);
1639 cur_cfa
->reg
= dw_stack_pointer_regnum
;
1641 else if (GET_CODE (src
) == LO_SUM
)
1642 /* Assume we've set the source reg of the LO_SUM from sp. */
1645 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1647 if (GET_CODE (src
) != MINUS
)
1649 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1650 cur_cfa
->offset
+= offset
;
1651 if (cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
)
1652 cur_trace
->cfa_store
.offset
+= offset
;
1654 else if (dest
== hard_frame_pointer_rtx
)
1657 /* Either setting the FP from an offset of the SP,
1658 or adjusting the FP */
1659 gcc_assert (frame_pointer_needed
);
1661 gcc_assert (REG_P (XEXP (src
, 0))
1662 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1663 && CONST_INT_P (XEXP (src
, 1)));
1664 offset
= INTVAL (XEXP (src
, 1));
1665 if (GET_CODE (src
) != MINUS
)
1667 cur_cfa
->offset
+= offset
;
1668 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1672 gcc_assert (GET_CODE (src
) != MINUS
);
1675 if (REG_P (XEXP (src
, 0))
1676 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1677 && CONST_INT_P (XEXP (src
, 1)))
1679 /* Setting a temporary CFA register that will be copied
1680 into the FP later on. */
1681 offset
= - INTVAL (XEXP (src
, 1));
1682 cur_cfa
->offset
+= offset
;
1683 cur_cfa
->reg
= dwf_regno (dest
);
1684 /* Or used to save regs to the stack. */
1685 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1686 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1690 else if (REG_P (XEXP (src
, 0))
1691 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1692 && XEXP (src
, 1) == stack_pointer_rtx
)
1694 /* Setting a scratch register that we will use instead
1695 of SP for saving registers to the stack. */
1696 gcc_assert (cur_cfa
->reg
== dw_stack_pointer_regnum
);
1697 cur_trace
->cfa_store
.reg
= dwf_regno (dest
);
1698 cur_trace
->cfa_store
.offset
1699 = cur_cfa
->offset
- cur_trace
->cfa_temp
.offset
;
1703 else if (GET_CODE (src
) == LO_SUM
1704 && CONST_INT_P (XEXP (src
, 1)))
1706 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1707 cur_trace
->cfa_temp
.offset
= INTVAL (XEXP (src
, 1));
1716 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1717 cur_trace
->cfa_temp
.offset
= INTVAL (src
);
1722 gcc_assert (REG_P (XEXP (src
, 0))
1723 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1724 && CONST_INT_P (XEXP (src
, 1)));
1726 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1727 cur_trace
->cfa_temp
.offset
|= INTVAL (XEXP (src
, 1));
1730 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1731 which will fill in all of the bits. */
1738 case UNSPEC_VOLATILE
:
1739 /* All unspecs should be represented by REG_CFA_* notes. */
1745 /* If this AND operation happens on stack pointer in prologue,
1746 we assume the stack is realigned and we extract the
1748 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1750 /* We interpret reg_save differently with stack_realign set.
1751 Thus we must flush whatever we have queued first. */
1752 dwarf2out_flush_queued_reg_saves ();
1754 gcc_assert (cur_trace
->cfa_store
.reg
1755 == dwf_regno (XEXP (src
, 0)));
1756 fde
->stack_realign
= 1;
1757 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1758 cur_trace
->cfa_store
.offset
= 0;
1760 if (cur_cfa
->reg
!= dw_stack_pointer_regnum
1761 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1762 fde
->drap_reg
= cur_cfa
->reg
;
1773 /* Saving a register to the stack. Make sure dest is relative to the
1775 switch (GET_CODE (XEXP (dest
, 0)))
1781 /* We can't handle variable size modifications. */
1782 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest
, 0), 1), 1))
1784 offset
= -INTVAL (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1786 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1787 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1789 cur_trace
->cfa_store
.offset
+= offset
;
1790 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1791 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1793 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1794 offset
-= cur_trace
->cfa_store
.offset
;
1796 offset
= -cur_trace
->cfa_store
.offset
;
1803 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1804 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1807 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1808 == STACK_POINTER_REGNUM
)
1809 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1811 cur_trace
->cfa_store
.offset
+= offset
;
1813 /* Rule 18: If stack is aligned, we will use FP as a
1814 reference to represent the address of the stored
1817 && fde
->stack_realign
1819 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
)
1821 gcc_assert (cur_cfa
->reg
!= dw_frame_pointer_regnum
);
1822 cur_trace
->cfa_store
.offset
= 0;
1825 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1826 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1828 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1829 offset
+= -cur_trace
->cfa_store
.offset
;
1831 offset
= -cur_trace
->cfa_store
.offset
;
1835 /* With an offset. */
1842 gcc_assert (CONST_INT_P (XEXP (XEXP (dest
, 0), 1))
1843 && REG_P (XEXP (XEXP (dest
, 0), 0)));
1844 offset
= INTVAL (XEXP (XEXP (dest
, 0), 1));
1845 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1848 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1850 if (cur_cfa
->reg
== regno
)
1851 offset
-= cur_cfa
->offset
;
1852 else if (cur_trace
->cfa_store
.reg
== regno
)
1853 offset
-= cur_trace
->cfa_store
.offset
;
1856 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1857 offset
-= cur_trace
->cfa_temp
.offset
;
1863 /* Without an offset. */
1866 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1868 if (cur_cfa
->reg
== regno
)
1869 offset
= -cur_cfa
->offset
;
1870 else if (cur_trace
->cfa_store
.reg
== regno
)
1871 offset
= -cur_trace
->cfa_store
.offset
;
1874 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1875 offset
= -cur_trace
->cfa_temp
.offset
;
1882 gcc_assert (cur_trace
->cfa_temp
.reg
1883 == dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1884 offset
= -cur_trace
->cfa_temp
.offset
;
1885 cur_trace
->cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1893 /* If the source operand of this MEM operation is a memory,
1894 we only care how much stack grew. */
1899 && REGNO (src
) != STACK_POINTER_REGNUM
1900 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1901 && dwf_regno (src
) == cur_cfa
->reg
)
1903 /* We're storing the current CFA reg into the stack. */
1905 if (cur_cfa
->offset
== 0)
1908 /* If stack is aligned, putting CFA reg into stack means
1909 we can no longer use reg + offset to represent CFA.
1910 Here we use DW_CFA_def_cfa_expression instead. The
1911 result of this expression equals to the original CFA
1914 && fde
->stack_realign
1915 && cur_cfa
->indirect
== 0
1916 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1918 gcc_assert (fde
->drap_reg
== cur_cfa
->reg
);
1920 cur_cfa
->indirect
= 1;
1921 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1922 cur_cfa
->base_offset
= offset
;
1923 cur_cfa
->offset
= 0;
1925 fde
->drap_reg_saved
= 1;
1929 /* If the source register is exactly the CFA, assume
1930 we're saving SP like any other register; this happens
1932 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1937 /* Otherwise, we'll need to look in the stack to
1938 calculate the CFA. */
1939 rtx x
= XEXP (dest
, 0);
1943 gcc_assert (REG_P (x
));
1945 cur_cfa
->reg
= dwf_regno (x
);
1946 cur_cfa
->base_offset
= offset
;
1947 cur_cfa
->indirect
= 1;
1953 span
= targetm
.dwarf_register_span (src
);
1958 queue_reg_save (src
, NULL_RTX
, offset
);
1961 /* We have a PARALLEL describing where the contents of SRC live.
1962 Queue register saves for each piece of the PARALLEL. */
1963 HOST_WIDE_INT span_offset
= offset
;
1965 gcc_assert (GET_CODE (span
) == PARALLEL
);
1967 const int par_len
= XVECLEN (span
, 0);
1968 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1970 rtx elem
= XVECEXP (span
, 0, par_index
);
1971 queue_reg_save (elem
, NULL_RTX
, span_offset
);
1972 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1982 /* Record call frame debugging information for INSN, which either sets
1983 SP or FP (adjusting how we calculate the frame address) or saves a
1984 register to the stack. */
1987 dwarf2out_frame_debug (rtx_insn
*insn
)
1990 bool handled_one
= false;
1992 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
1993 switch (REG_NOTE_KIND (note
))
1995 case REG_FRAME_RELATED_EXPR
:
1996 pat
= XEXP (note
, 0);
1999 case REG_CFA_DEF_CFA
:
2000 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2004 case REG_CFA_ADJUST_CFA
:
2009 if (GET_CODE (n
) == PARALLEL
)
2010 n
= XVECEXP (n
, 0, 0);
2012 dwarf2out_frame_debug_adjust_cfa (n
);
2016 case REG_CFA_OFFSET
:
2019 n
= single_set (insn
);
2020 dwarf2out_frame_debug_cfa_offset (n
);
2024 case REG_CFA_REGISTER
:
2029 if (GET_CODE (n
) == PARALLEL
)
2030 n
= XVECEXP (n
, 0, 0);
2032 dwarf2out_frame_debug_cfa_register (n
);
2036 case REG_CFA_EXPRESSION
:
2039 n
= single_set (insn
);
2040 dwarf2out_frame_debug_cfa_expression (n
);
2044 case REG_CFA_RESTORE
:
2049 if (GET_CODE (n
) == PARALLEL
)
2050 n
= XVECEXP (n
, 0, 0);
2053 dwarf2out_frame_debug_cfa_restore (n
);
2057 case REG_CFA_SET_VDRAP
:
2061 dw_fde_ref fde
= cfun
->fde
;
2064 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2066 fde
->vdrap_reg
= dwf_regno (n
);
2072 case REG_CFA_WINDOW_SAVE
:
2073 dwarf2out_frame_debug_cfa_window_save ();
2077 case REG_CFA_FLUSH_QUEUE
:
2078 /* The actual flush happens elsewhere. */
2088 pat
= PATTERN (insn
);
2090 dwarf2out_frame_debug_expr (pat
);
2092 /* Check again. A parallel can save and update the same register.
2093 We could probably check just once, here, but this is safer than
2094 removing the check at the start of the function. */
2095 if (clobbers_queued_reg_save (pat
))
2096 dwarf2out_flush_queued_reg_saves ();
2100 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2103 change_cfi_row (dw_cfi_row
*old_row
, dw_cfi_row
*new_row
)
2105 size_t i
, n_old
, n_new
, n_max
;
2108 if (new_row
->cfa_cfi
&& !cfi_equal_p (old_row
->cfa_cfi
, new_row
->cfa_cfi
))
2109 add_cfi (new_row
->cfa_cfi
);
2112 cfi
= def_cfa_0 (&old_row
->cfa
, &new_row
->cfa
);
2117 n_old
= vec_safe_length (old_row
->reg_save
);
2118 n_new
= vec_safe_length (new_row
->reg_save
);
2119 n_max
= MAX (n_old
, n_new
);
2121 for (i
= 0; i
< n_max
; ++i
)
2123 dw_cfi_ref r_old
= NULL
, r_new
= NULL
;
2126 r_old
= (*old_row
->reg_save
)[i
];
2128 r_new
= (*new_row
->reg_save
)[i
];
2132 else if (r_new
== NULL
)
2133 add_cfi_restore (i
);
2134 else if (!cfi_equal_p (r_old
, r_new
))
2139 /* Examine CFI and return true if a cfi label and set_loc is needed
2140 beforehand. Even when generating CFI assembler instructions, we
2141 still have to add the cfi to the list so that lookup_cfa_1 works
2142 later on. When -g2 and above we even need to force emitting of
2143 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2144 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2145 and so don't use convert_cfa_to_fb_loc_list. */
2148 cfi_label_required_p (dw_cfi_ref cfi
)
2150 if (!dwarf2out_do_cfi_asm ())
2153 if (dwarf_version
== 2
2154 && debug_info_level
> DINFO_LEVEL_TERSE
2155 && (write_symbols
== DWARF2_DEBUG
2156 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2158 switch (cfi
->dw_cfi_opc
)
2160 case DW_CFA_def_cfa_offset
:
2161 case DW_CFA_def_cfa_offset_sf
:
2162 case DW_CFA_def_cfa_register
:
2163 case DW_CFA_def_cfa
:
2164 case DW_CFA_def_cfa_sf
:
2165 case DW_CFA_def_cfa_expression
:
2166 case DW_CFA_restore_state
:
2175 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2176 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2179 add_cfis_to_fde (void)
2181 dw_fde_ref fde
= cfun
->fde
;
2182 rtx_insn
*insn
, *next
;
2183 /* We always start with a function_begin label. */
2186 for (insn
= get_insns (); insn
; insn
= next
)
2188 next
= NEXT_INSN (insn
);
2190 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2192 fde
->dw_fde_switch_cfi_index
= vec_safe_length (fde
->dw_fde_cfi
);
2193 /* Don't attempt to advance_loc4 between labels
2194 in different sections. */
2198 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2200 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2202 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2204 required
|= cfi_label_required_p (NOTE_CFI (next
));
2205 next
= NEXT_INSN (next
);
2207 else if (active_insn_p (next
)
2208 || (NOTE_P (next
) && (NOTE_KIND (next
)
2209 == NOTE_INSN_SWITCH_TEXT_SECTIONS
)))
2212 next
= NEXT_INSN (next
);
2215 int num
= dwarf2out_cfi_label_num
;
2216 const char *label
= dwarf2out_cfi_label ();
2219 /* Set the location counter to the new label. */
2221 xcfi
->dw_cfi_opc
= (first
? DW_CFA_set_loc
2222 : DW_CFA_advance_loc4
);
2223 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2224 vec_safe_push (fde
->dw_fde_cfi
, xcfi
);
2226 rtx_note
*tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2227 NOTE_LABEL_NUMBER (tmp
) = num
;
2232 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2233 vec_safe_push (fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2234 insn
= NEXT_INSN (insn
);
2236 while (insn
!= next
);
2242 /* If LABEL is the start of a trace, then initialize the state of that
2243 trace from CUR_TRACE and CUR_ROW. */
2246 maybe_record_trace_start (rtx_insn
*start
, rtx_insn
*origin
)
2249 HOST_WIDE_INT args_size
;
2251 ti
= get_trace_info (start
);
2252 gcc_assert (ti
!= NULL
);
2256 fprintf (dump_file
, " saw edge from trace %u to %u (via %s %d)\n",
2257 cur_trace
->id
, ti
->id
,
2258 (origin
? rtx_name
[(int) GET_CODE (origin
)] : "fallthru"),
2259 (origin
? INSN_UID (origin
) : 0));
2262 args_size
= cur_trace
->end_true_args_size
;
2263 if (ti
->beg_row
== NULL
)
2265 /* This is the first time we've encountered this trace. Propagate
2266 state across the edge and push the trace onto the work list. */
2267 ti
->beg_row
= copy_cfi_row (cur_row
);
2268 ti
->beg_true_args_size
= args_size
;
2270 ti
->cfa_store
= cur_trace
->cfa_store
;
2271 ti
->cfa_temp
= cur_trace
->cfa_temp
;
2272 ti
->regs_saved_in_regs
= cur_trace
->regs_saved_in_regs
.copy ();
2274 trace_work_list
.safe_push (ti
);
2277 fprintf (dump_file
, "\tpush trace %u to worklist\n", ti
->id
);
2282 /* We ought to have the same state incoming to a given trace no
2283 matter how we arrive at the trace. Anything else means we've
2284 got some kind of optimization error. */
2285 gcc_checking_assert (cfi_row_equal_p (cur_row
, ti
->beg_row
));
2287 /* The args_size is allowed to conflict if it isn't actually used. */
2288 if (ti
->beg_true_args_size
!= args_size
)
2289 ti
->args_size_undefined
= true;
2293 /* Similarly, but handle the args_size and CFA reset across EH
2294 and non-local goto edges. */
2297 maybe_record_trace_start_abnormal (rtx_insn
*start
, rtx_insn
*origin
)
2299 HOST_WIDE_INT save_args_size
, delta
;
2300 dw_cfa_location save_cfa
;
2302 save_args_size
= cur_trace
->end_true_args_size
;
2303 if (save_args_size
== 0)
2305 maybe_record_trace_start (start
, origin
);
2309 delta
= -save_args_size
;
2310 cur_trace
->end_true_args_size
= 0;
2312 save_cfa
= cur_row
->cfa
;
2313 if (cur_row
->cfa
.reg
== dw_stack_pointer_regnum
)
2315 /* Convert a change in args_size (always a positive in the
2316 direction of stack growth) to a change in stack pointer. */
2317 if (!STACK_GROWS_DOWNWARD
)
2320 cur_row
->cfa
.offset
+= delta
;
2323 maybe_record_trace_start (start
, origin
);
2325 cur_trace
->end_true_args_size
= save_args_size
;
2326 cur_row
->cfa
= save_cfa
;
2329 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2330 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2333 create_trace_edges (rtx_insn
*insn
)
2340 rtx_jump_table_data
*table
;
2342 if (find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2345 if (tablejump_p (insn
, NULL
, &table
))
2347 rtvec vec
= table
->get_labels ();
2349 n
= GET_NUM_ELEM (vec
);
2350 for (i
= 0; i
< n
; ++i
)
2352 rtx_insn
*lab
= as_a
<rtx_insn
*> (XEXP (RTVEC_ELT (vec
, i
), 0));
2353 maybe_record_trace_start (lab
, insn
);
2356 else if (computed_jump_p (insn
))
2360 FOR_EACH_VEC_SAFE_ELT (forced_labels
, i
, temp
)
2361 maybe_record_trace_start (temp
, insn
);
2363 else if (returnjump_p (insn
))
2365 else if ((tmp
= extract_asm_operands (PATTERN (insn
))) != NULL
)
2367 n
= ASM_OPERANDS_LABEL_LENGTH (tmp
);
2368 for (i
= 0; i
< n
; ++i
)
2371 as_a
<rtx_insn
*> (XEXP (ASM_OPERANDS_LABEL (tmp
, i
), 0));
2372 maybe_record_trace_start (lab
, insn
);
2377 rtx_insn
*lab
= JUMP_LABEL_AS_INSN (insn
);
2378 gcc_assert (lab
!= NULL
);
2379 maybe_record_trace_start (lab
, insn
);
2382 else if (CALL_P (insn
))
2384 /* Sibling calls don't have edges inside this function. */
2385 if (SIBLING_CALL_P (insn
))
2388 /* Process non-local goto edges. */
2389 if (can_nonlocal_goto (insn
))
2390 for (rtx_insn_list
*lab
= nonlocal_goto_handler_labels
;
2393 maybe_record_trace_start_abnormal (lab
->insn (), insn
);
2395 else if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2397 int i
, n
= seq
->len ();
2398 for (i
= 0; i
< n
; ++i
)
2399 create_trace_edges (seq
->insn (i
));
2403 /* Process EH edges. */
2404 if (CALL_P (insn
) || cfun
->can_throw_non_call_exceptions
)
2406 eh_landing_pad lp
= get_eh_landing_pad_from_rtx (insn
);
2408 maybe_record_trace_start_abnormal (lp
->landing_pad
, insn
);
2412 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2415 scan_insn_after (rtx_insn
*insn
)
2417 if (RTX_FRAME_RELATED_P (insn
))
2418 dwarf2out_frame_debug (insn
);
2419 notice_args_size (insn
);
2422 /* Scan the trace beginning at INSN and create the CFI notes for the
2423 instructions therein. */
2426 scan_trace (dw_trace_info
*trace
)
2428 rtx_insn
*prev
, *insn
= trace
->head
;
2429 dw_cfa_location this_cfa
;
2432 fprintf (dump_file
, "Processing trace %u : start at %s %d\n",
2433 trace
->id
, rtx_name
[(int) GET_CODE (insn
)],
2436 trace
->end_row
= copy_cfi_row (trace
->beg_row
);
2437 trace
->end_true_args_size
= trace
->beg_true_args_size
;
2440 cur_row
= trace
->end_row
;
2442 this_cfa
= cur_row
->cfa
;
2443 cur_cfa
= &this_cfa
;
2445 for (prev
= insn
, insn
= NEXT_INSN (insn
);
2447 prev
= insn
, insn
= NEXT_INSN (insn
))
2451 /* Do everything that happens "before" the insn. */
2452 add_cfi_insn
= prev
;
2454 /* Notice the end of a trace. */
2455 if (BARRIER_P (insn
))
2457 /* Don't bother saving the unneeded queued registers at all. */
2458 queued_reg_saves
.truncate (0);
2461 if (save_point_p (insn
))
2463 /* Propagate across fallthru edges. */
2464 dwarf2out_flush_queued_reg_saves ();
2465 maybe_record_trace_start (insn
, NULL
);
2469 if (DEBUG_INSN_P (insn
) || !inside_basic_block_p (insn
))
2472 /* Handle all changes to the row state. Sequences require special
2473 handling for the positioning of the notes. */
2474 if (rtx_sequence
*pat
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2477 int i
, n
= pat
->len ();
2479 control
= pat
->insn (0);
2480 if (can_throw_internal (control
))
2481 notice_eh_throw (control
);
2482 dwarf2out_flush_queued_reg_saves ();
2484 if (JUMP_P (control
) && INSN_ANNULLED_BRANCH_P (control
))
2486 /* ??? Hopefully multiple delay slots are not annulled. */
2487 gcc_assert (n
== 2);
2488 gcc_assert (!RTX_FRAME_RELATED_P (control
));
2489 gcc_assert (!find_reg_note (control
, REG_ARGS_SIZE
, NULL
));
2491 elt
= pat
->insn (1);
2493 if (INSN_FROM_TARGET_P (elt
))
2495 HOST_WIDE_INT restore_args_size
;
2496 cfi_vec save_row_reg_save
;
2498 /* If ELT is an instruction from target of an annulled
2499 branch, the effects are for the target only and so
2500 the args_size and CFA along the current path
2501 shouldn't change. */
2502 add_cfi_insn
= NULL
;
2503 restore_args_size
= cur_trace
->end_true_args_size
;
2504 cur_cfa
= &cur_row
->cfa
;
2505 save_row_reg_save
= vec_safe_copy (cur_row
->reg_save
);
2507 scan_insn_after (elt
);
2509 /* ??? Should we instead save the entire row state? */
2510 gcc_assert (!queued_reg_saves
.length ());
2512 create_trace_edges (control
);
2514 cur_trace
->end_true_args_size
= restore_args_size
;
2515 cur_row
->cfa
= this_cfa
;
2516 cur_row
->reg_save
= save_row_reg_save
;
2517 cur_cfa
= &this_cfa
;
2521 /* If ELT is a annulled branch-taken instruction (i.e.
2522 executed only when branch is not taken), the args_size
2523 and CFA should not change through the jump. */
2524 create_trace_edges (control
);
2526 /* Update and continue with the trace. */
2527 add_cfi_insn
= insn
;
2528 scan_insn_after (elt
);
2529 def_cfa_1 (&this_cfa
);
2534 /* The insns in the delay slot should all be considered to happen
2535 "before" a call insn. Consider a call with a stack pointer
2536 adjustment in the delay slot. The backtrace from the callee
2537 should include the sp adjustment. Unfortunately, that leaves
2538 us with an unavoidable unwinding error exactly at the call insn
2539 itself. For jump insns we'd prefer to avoid this error by
2540 placing the notes after the sequence. */
2541 if (JUMP_P (control
))
2542 add_cfi_insn
= insn
;
2544 for (i
= 1; i
< n
; ++i
)
2546 elt
= pat
->insn (i
);
2547 scan_insn_after (elt
);
2550 /* Make sure any register saves are visible at the jump target. */
2551 dwarf2out_flush_queued_reg_saves ();
2552 any_cfis_emitted
= false;
2554 /* However, if there is some adjustment on the call itself, e.g.
2555 a call_pop, that action should be considered to happen after
2556 the call returns. */
2557 add_cfi_insn
= insn
;
2558 scan_insn_after (control
);
2562 /* Flush data before calls and jumps, and of course if necessary. */
2563 if (can_throw_internal (insn
))
2565 notice_eh_throw (insn
);
2566 dwarf2out_flush_queued_reg_saves ();
2568 else if (!NONJUMP_INSN_P (insn
)
2569 || clobbers_queued_reg_save (insn
)
2570 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2571 dwarf2out_flush_queued_reg_saves ();
2572 any_cfis_emitted
= false;
2574 add_cfi_insn
= insn
;
2575 scan_insn_after (insn
);
2579 /* Between frame-related-p and args_size we might have otherwise
2580 emitted two cfa adjustments. Do it now. */
2581 def_cfa_1 (&this_cfa
);
2583 /* Minimize the number of advances by emitting the entire queue
2584 once anything is emitted. */
2585 if (any_cfis_emitted
2586 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2587 dwarf2out_flush_queued_reg_saves ();
2589 /* Note that a test for control_flow_insn_p does exactly the
2590 same tests as are done to actually create the edges. So
2591 always call the routine and let it not create edges for
2592 non-control-flow insns. */
2593 create_trace_edges (control
);
2596 add_cfi_insn
= NULL
;
2602 /* Scan the function and create the initial set of CFI notes. */
2605 create_cfi_notes (void)
2609 gcc_checking_assert (!queued_reg_saves
.exists ());
2610 gcc_checking_assert (!trace_work_list
.exists ());
2612 /* Always begin at the entry trace. */
2613 ti
= &trace_info
[0];
2616 while (!trace_work_list
.is_empty ())
2618 ti
= trace_work_list
.pop ();
2622 queued_reg_saves
.release ();
2623 trace_work_list
.release ();
2626 /* Return the insn before the first NOTE_INSN_CFI after START. */
2629 before_next_cfi_note (rtx_insn
*start
)
2631 rtx_insn
*prev
= start
;
2634 if (NOTE_P (start
) && NOTE_KIND (start
) == NOTE_INSN_CFI
)
2637 start
= NEXT_INSN (start
);
2642 /* Insert CFI notes between traces to properly change state between them. */
2645 connect_traces (void)
2647 unsigned i
, n
= trace_info
.length ();
2648 dw_trace_info
*prev_ti
, *ti
;
2650 /* ??? Ideally, we should have both queued and processed every trace.
2651 However the current representation of constant pools on various targets
2652 is indistinguishable from unreachable code. Assume for the moment that
2653 we can simply skip over such traces. */
2654 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2655 these are not "real" instructions, and should not be considered.
2656 This could be generically useful for tablejump data as well. */
2657 /* Remove all unprocessed traces from the list. */
2658 for (i
= n
- 1; i
> 0; --i
)
2660 ti
= &trace_info
[i
];
2661 if (ti
->beg_row
== NULL
)
2663 trace_info
.ordered_remove (i
);
2667 gcc_assert (ti
->end_row
!= NULL
);
2670 /* Work from the end back to the beginning. This lets us easily insert
2671 remember/restore_state notes in the correct order wrt other notes. */
2672 prev_ti
= &trace_info
[n
- 1];
2673 for (i
= n
- 1; i
> 0; --i
)
2675 dw_cfi_row
*old_row
;
2678 prev_ti
= &trace_info
[i
- 1];
2680 add_cfi_insn
= ti
->head
;
2682 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2683 for the portion of the function in the alternate text
2684 section. The row state at the very beginning of that
2685 new FDE will be exactly the row state from the CIE. */
2686 if (ti
->switch_sections
)
2687 old_row
= cie_cfi_row
;
2690 old_row
= prev_ti
->end_row
;
2691 /* If there's no change from the previous end state, fine. */
2692 if (cfi_row_equal_p (old_row
, ti
->beg_row
))
2694 /* Otherwise check for the common case of sharing state with
2695 the beginning of an epilogue, but not the end. Insert
2696 remember/restore opcodes in that case. */
2697 else if (cfi_row_equal_p (prev_ti
->beg_row
, ti
->beg_row
))
2701 /* Note that if we blindly insert the remember at the
2702 start of the trace, we can wind up increasing the
2703 size of the unwind info due to extra advance opcodes.
2704 Instead, put the remember immediately before the next
2705 state change. We know there must be one, because the
2706 state at the beginning and head of the trace differ. */
2707 add_cfi_insn
= before_next_cfi_note (prev_ti
->head
);
2709 cfi
->dw_cfi_opc
= DW_CFA_remember_state
;
2712 add_cfi_insn
= ti
->head
;
2714 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2717 old_row
= prev_ti
->beg_row
;
2719 /* Otherwise, we'll simply change state from the previous end. */
2722 change_cfi_row (old_row
, ti
->beg_row
);
2724 if (dump_file
&& add_cfi_insn
!= ti
->head
)
2728 fprintf (dump_file
, "Fixup between trace %u and %u:\n",
2729 prev_ti
->id
, ti
->id
);
2734 note
= NEXT_INSN (note
);
2735 gcc_assert (NOTE_P (note
) && NOTE_KIND (note
) == NOTE_INSN_CFI
);
2736 output_cfi_directive (dump_file
, NOTE_CFI (note
));
2738 while (note
!= add_cfi_insn
);
2742 /* Connect args_size between traces that have can_throw_internal insns. */
2743 if (cfun
->eh
->lp_array
)
2745 HOST_WIDE_INT prev_args_size
= 0;
2747 for (i
= 0; i
< n
; ++i
)
2749 ti
= &trace_info
[i
];
2751 if (ti
->switch_sections
)
2753 if (ti
->eh_head
== NULL
)
2755 gcc_assert (!ti
->args_size_undefined
);
2757 if (ti
->beg_delay_args_size
!= prev_args_size
)
2759 /* ??? Search back to previous CFI note. */
2760 add_cfi_insn
= PREV_INSN (ti
->eh_head
);
2761 add_cfi_args_size (ti
->beg_delay_args_size
);
2764 prev_args_size
= ti
->end_delay_args_size
;
2769 /* Set up the pseudo-cfg of instruction traces, as described at the
2770 block comment at the top of the file. */
2773 create_pseudo_cfg (void)
2775 bool saw_barrier
, switch_sections
;
2780 /* The first trace begins at the start of the function,
2781 and begins with the CIE row state. */
2782 trace_info
.create (16);
2783 memset (&ti
, 0, sizeof (ti
));
2784 ti
.head
= get_insns ();
2785 ti
.beg_row
= cie_cfi_row
;
2786 ti
.cfa_store
= cie_cfi_row
->cfa
;
2787 ti
.cfa_temp
.reg
= INVALID_REGNUM
;
2788 trace_info
.quick_push (ti
);
2790 if (cie_return_save
)
2791 ti
.regs_saved_in_regs
.safe_push (*cie_return_save
);
2793 /* Walk all the insns, collecting start of trace locations. */
2794 saw_barrier
= false;
2795 switch_sections
= false;
2796 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2798 if (BARRIER_P (insn
))
2800 else if (NOTE_P (insn
)
2801 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2803 /* We should have just seen a barrier. */
2804 gcc_assert (saw_barrier
);
2805 switch_sections
= true;
2807 /* Watch out for save_point notes between basic blocks.
2808 In particular, a note after a barrier. Do not record these,
2809 delaying trace creation until the label. */
2810 else if (save_point_p (insn
)
2811 && (LABEL_P (insn
) || !saw_barrier
))
2813 memset (&ti
, 0, sizeof (ti
));
2815 ti
.switch_sections
= switch_sections
;
2816 ti
.id
= trace_info
.length ();
2817 trace_info
.safe_push (ti
);
2819 saw_barrier
= false;
2820 switch_sections
= false;
2824 /* Create the trace index after we've finished building trace_info,
2825 avoiding stale pointer problems due to reallocation. */
2827 = new hash_table
<trace_info_hasher
> (trace_info
.length ());
2829 FOR_EACH_VEC_ELT (trace_info
, i
, tp
)
2831 dw_trace_info
**slot
;
2834 fprintf (dump_file
, "Creating trace %u : start at %s %d%s\n", tp
->id
,
2835 rtx_name
[(int) GET_CODE (tp
->head
)], INSN_UID (tp
->head
),
2836 tp
->switch_sections
? " (section switch)" : "");
2838 slot
= trace_index
->find_slot_with_hash (tp
, INSN_UID (tp
->head
), INSERT
);
2839 gcc_assert (*slot
== NULL
);
2844 /* Record the initial position of the return address. RTL is
2845 INCOMING_RETURN_ADDR_RTX. */
2848 initial_return_save (rtx rtl
)
2850 unsigned int reg
= INVALID_REGNUM
;
2851 HOST_WIDE_INT offset
= 0;
2853 switch (GET_CODE (rtl
))
2856 /* RA is in a register. */
2857 reg
= dwf_regno (rtl
);
2861 /* RA is on the stack. */
2862 rtl
= XEXP (rtl
, 0);
2863 switch (GET_CODE (rtl
))
2866 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2871 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2872 offset
= INTVAL (XEXP (rtl
, 1));
2876 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2877 offset
= -INTVAL (XEXP (rtl
, 1));
2887 /* The return address is at some offset from any value we can
2888 actually load. For instance, on the SPARC it is in %i7+8. Just
2889 ignore the offset for now; it doesn't matter for unwinding frames. */
2890 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2891 initial_return_save (XEXP (rtl
, 0));
2898 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2900 if (reg
!= INVALID_REGNUM
)
2901 record_reg_saved_in_reg (rtl
, pc_rtx
);
2902 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cur_row
->cfa
.offset
);
2907 create_cie_data (void)
2909 dw_cfa_location loc
;
2910 dw_trace_info cie_trace
;
2912 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2914 memset (&cie_trace
, 0, sizeof (cie_trace
));
2915 cur_trace
= &cie_trace
;
2917 add_cfi_vec
= &cie_cfi_vec
;
2918 cie_cfi_row
= cur_row
= new_cfi_row ();
2920 /* On entry, the Canonical Frame Address is at SP. */
2921 memset (&loc
, 0, sizeof (loc
));
2922 loc
.reg
= dw_stack_pointer_regnum
;
2923 loc
.offset
= INCOMING_FRAME_SP_OFFSET
;
2926 if (targetm
.debug_unwind_info () == UI_DWARF2
2927 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
2929 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
2931 /* For a few targets, we have the return address incoming into a
2932 register, but choose a different return column. This will result
2933 in a DW_CFA_register for the return, and an entry in
2934 regs_saved_in_regs to match. If the target later stores that
2935 return address register to the stack, we want to be able to emit
2936 the DW_CFA_offset against the return column, not the intermediate
2937 save register. Save the contents of regs_saved_in_regs so that
2938 we can re-initialize it at the start of each function. */
2939 switch (cie_trace
.regs_saved_in_regs
.length ())
2944 cie_return_save
= ggc_alloc
<reg_saved_in_data
> ();
2945 *cie_return_save
= cie_trace
.regs_saved_in_regs
[0];
2946 cie_trace
.regs_saved_in_regs
.release ();
2958 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2959 state at each location within the function. These notes will be
2960 emitted during pass_final. */
2963 execute_dwarf2_frame (void)
2965 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2966 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
2968 /* The first time we're called, compute the incoming frame state. */
2969 if (cie_cfi_vec
== NULL
)
2972 dwarf2out_alloc_current_fde ();
2974 create_pseudo_cfg ();
2977 create_cfi_notes ();
2981 /* Free all the data we allocated. */
2986 FOR_EACH_VEC_ELT (trace_info
, i
, ti
)
2987 ti
->regs_saved_in_regs
.release ();
2989 trace_info
.release ();
2997 /* Convert a DWARF call frame info. operation to its string name */
3000 dwarf_cfi_name (unsigned int cfi_opc
)
3002 const char *name
= get_DW_CFA_name (cfi_opc
);
3007 return "DW_CFA_<unknown>";
3010 /* This routine will generate the correct assembly data for a location
3011 description based on a cfi entry with a complex address. */
3014 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
3016 dw_loc_descr_ref loc
;
3019 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
3022 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3023 dw2_asm_output_data (1, r
, NULL
);
3024 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3027 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3029 /* Output the size of the block. */
3030 size
= size_of_locs (loc
);
3031 dw2_asm_output_data_uleb128 (size
, NULL
);
3033 /* Now output the operations themselves. */
3034 output_loc_sequence (loc
, for_eh
);
3037 /* Similar, but used for .cfi_escape. */
3040 output_cfa_loc_raw (dw_cfi_ref cfi
)
3042 dw_loc_descr_ref loc
;
3045 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
3048 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3049 fprintf (asm_out_file
, "%#x,", r
);
3050 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3053 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3055 /* Output the size of the block. */
3056 size
= size_of_locs (loc
);
3057 dw2_asm_output_data_uleb128_raw (size
);
3058 fputc (',', asm_out_file
);
3060 /* Now output the operations themselves. */
3061 output_loc_sequence_raw (loc
);
3064 /* Output a Call Frame Information opcode and its operand(s). */
3067 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
3072 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
3073 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
3074 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
3075 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
3076 ((unsigned HOST_WIDE_INT
)
3077 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
3078 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
3080 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3081 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3082 "DW_CFA_offset, column %#lx", r
);
3083 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3084 dw2_asm_output_data_uleb128 (off
, NULL
);
3086 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
3088 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3089 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3090 "DW_CFA_restore, column %#lx", r
);
3094 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
3095 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
3097 switch (cfi
->dw_cfi_opc
)
3099 case DW_CFA_set_loc
:
3101 dw2_asm_output_encoded_addr_rtx (
3102 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3103 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
3106 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
3107 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
3108 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3111 case DW_CFA_advance_loc1
:
3112 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3113 fde
->dw_fde_current_label
, NULL
);
3114 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3117 case DW_CFA_advance_loc2
:
3118 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3119 fde
->dw_fde_current_label
, NULL
);
3120 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3123 case DW_CFA_advance_loc4
:
3124 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3125 fde
->dw_fde_current_label
, NULL
);
3126 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3129 case DW_CFA_MIPS_advance_loc8
:
3130 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3131 fde
->dw_fde_current_label
, NULL
);
3132 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3135 case DW_CFA_offset_extended
:
3136 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3137 dw2_asm_output_data_uleb128 (r
, NULL
);
3138 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3139 dw2_asm_output_data_uleb128 (off
, NULL
);
3142 case DW_CFA_def_cfa
:
3143 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3144 dw2_asm_output_data_uleb128 (r
, NULL
);
3145 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
3148 case DW_CFA_offset_extended_sf
:
3149 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3150 dw2_asm_output_data_uleb128 (r
, NULL
);
3151 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3152 dw2_asm_output_data_sleb128 (off
, NULL
);
3155 case DW_CFA_def_cfa_sf
:
3156 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3157 dw2_asm_output_data_uleb128 (r
, NULL
);
3158 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3159 dw2_asm_output_data_sleb128 (off
, NULL
);
3162 case DW_CFA_restore_extended
:
3163 case DW_CFA_undefined
:
3164 case DW_CFA_same_value
:
3165 case DW_CFA_def_cfa_register
:
3166 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3167 dw2_asm_output_data_uleb128 (r
, NULL
);
3170 case DW_CFA_register
:
3171 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3172 dw2_asm_output_data_uleb128 (r
, NULL
);
3173 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
3174 dw2_asm_output_data_uleb128 (r
, NULL
);
3177 case DW_CFA_def_cfa_offset
:
3178 case DW_CFA_GNU_args_size
:
3179 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
3182 case DW_CFA_def_cfa_offset_sf
:
3183 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3184 dw2_asm_output_data_sleb128 (off
, NULL
);
3187 case DW_CFA_GNU_window_save
:
3190 case DW_CFA_def_cfa_expression
:
3191 case DW_CFA_expression
:
3192 output_cfa_loc (cfi
, for_eh
);
3195 case DW_CFA_GNU_negative_offset_extended
:
3196 /* Obsoleted by DW_CFA_offset_extended_sf. */
3205 /* Similar, but do it via assembler directives instead. */
3208 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
3210 unsigned long r
, r2
;
3212 switch (cfi
->dw_cfi_opc
)
3214 case DW_CFA_advance_loc
:
3215 case DW_CFA_advance_loc1
:
3216 case DW_CFA_advance_loc2
:
3217 case DW_CFA_advance_loc4
:
3218 case DW_CFA_MIPS_advance_loc8
:
3219 case DW_CFA_set_loc
:
3220 /* Should only be created in a code path not followed when emitting
3221 via directives. The assembler is going to take care of this for
3222 us. But this routines is also used for debugging dumps, so
3224 gcc_assert (f
!= asm_out_file
);
3225 fprintf (f
, "\t.cfi_advance_loc\n");
3229 case DW_CFA_offset_extended
:
3230 case DW_CFA_offset_extended_sf
:
3231 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3232 fprintf (f
, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3233 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3236 case DW_CFA_restore
:
3237 case DW_CFA_restore_extended
:
3238 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3239 fprintf (f
, "\t.cfi_restore %lu\n", r
);
3242 case DW_CFA_undefined
:
3243 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3244 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
3247 case DW_CFA_same_value
:
3248 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3249 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
3252 case DW_CFA_def_cfa
:
3253 case DW_CFA_def_cfa_sf
:
3254 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3255 fprintf (f
, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3256 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3259 case DW_CFA_def_cfa_register
:
3260 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3261 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
3264 case DW_CFA_register
:
3265 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3266 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
3267 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
3270 case DW_CFA_def_cfa_offset
:
3271 case DW_CFA_def_cfa_offset_sf
:
3272 fprintf (f
, "\t.cfi_def_cfa_offset "
3273 HOST_WIDE_INT_PRINT_DEC
"\n",
3274 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3277 case DW_CFA_remember_state
:
3278 fprintf (f
, "\t.cfi_remember_state\n");
3280 case DW_CFA_restore_state
:
3281 fprintf (f
, "\t.cfi_restore_state\n");
3284 case DW_CFA_GNU_args_size
:
3285 if (f
== asm_out_file
)
3287 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
3288 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3290 fprintf (f
, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC
,
3291 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3296 fprintf (f
, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC
"\n",
3297 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3301 case DW_CFA_GNU_window_save
:
3302 fprintf (f
, "\t.cfi_window_save\n");
3305 case DW_CFA_def_cfa_expression
:
3306 if (f
!= asm_out_file
)
3308 fprintf (f
, "\t.cfi_def_cfa_expression ...\n");
3312 case DW_CFA_expression
:
3313 if (f
!= asm_out_file
)
3315 fprintf (f
, "\t.cfi_cfa_expression ...\n");
3318 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
3319 output_cfa_loc_raw (cfi
);
3329 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
3331 if (dwarf2out_do_cfi_asm ())
3332 output_cfi_directive (asm_out_file
, cfi
);
3336 dump_cfi_row (FILE *f
, dw_cfi_row
*row
)
3344 dw_cfa_location dummy
;
3345 memset (&dummy
, 0, sizeof (dummy
));
3346 dummy
.reg
= INVALID_REGNUM
;
3347 cfi
= def_cfa_0 (&dummy
, &row
->cfa
);
3349 output_cfi_directive (f
, cfi
);
3351 FOR_EACH_VEC_SAFE_ELT (row
->reg_save
, i
, cfi
)
3353 output_cfi_directive (f
, cfi
);
3356 void debug_cfi_row (dw_cfi_row
*row
);
3359 debug_cfi_row (dw_cfi_row
*row
)
3361 dump_cfi_row (stderr
, row
);
3365 /* Save the result of dwarf2out_do_frame across PCH.
3366 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3367 static GTY(()) signed char saved_do_cfi_asm
= 0;
3369 /* Decide whether we want to emit frame unwind information for the current
3370 translation unit. */
3373 dwarf2out_do_frame (void)
3375 /* We want to emit correct CFA location expressions or lists, so we
3376 have to return true if we're going to output debug info, even if
3377 we're not going to output frame or unwind info. */
3378 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3381 if (saved_do_cfi_asm
> 0)
3384 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3387 if ((flag_unwind_tables
|| flag_exceptions
)
3388 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3394 /* Decide whether to emit frame unwind via assembler directives. */
3397 dwarf2out_do_cfi_asm (void)
3401 if (saved_do_cfi_asm
!= 0)
3402 return saved_do_cfi_asm
> 0;
3404 /* Assume failure for a moment. */
3405 saved_do_cfi_asm
= -1;
3407 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3409 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3412 /* Make sure the personality encoding is one the assembler can support.
3413 In particular, aligned addresses can't be handled. */
3414 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3415 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3417 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3418 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3421 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3422 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3423 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3424 && !flag_unwind_tables
&& !flag_exceptions
3425 && targetm_common
.except_unwind_info (&global_options
) != UI_DWARF2
)
3429 saved_do_cfi_asm
= 1;
3435 const pass_data pass_data_dwarf2_frame
=
3437 RTL_PASS
, /* type */
3438 "dwarf2", /* name */
3439 OPTGROUP_NONE
, /* optinfo_flags */
3440 TV_FINAL
, /* tv_id */
3441 0, /* properties_required */
3442 0, /* properties_provided */
3443 0, /* properties_destroyed */
3444 0, /* todo_flags_start */
3445 0, /* todo_flags_finish */
3448 class pass_dwarf2_frame
: public rtl_opt_pass
3451 pass_dwarf2_frame (gcc::context
*ctxt
)
3452 : rtl_opt_pass (pass_data_dwarf2_frame
, ctxt
)
3455 /* opt_pass methods: */
3456 virtual bool gate (function
*);
3457 virtual unsigned int execute (function
*) { return execute_dwarf2_frame (); }
3459 }; // class pass_dwarf2_frame
3462 pass_dwarf2_frame::gate (function
*)
3464 /* Targets which still implement the prologue in assembler text
3465 cannot use the generic dwarf2 unwinding. */
3466 if (!targetm
.have_prologue ())
3469 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3470 from the optimized shrink-wrapping annotations that we will compute.
3471 For now, only produce the CFI notes for dwarf2. */
3472 return dwarf2out_do_frame ();
3478 make_pass_dwarf2_frame (gcc::context
*ctxt
)
3480 return new pass_dwarf2_frame (ctxt
);
3483 #include "gt-dwarf2cfi.h"