1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
31 #include "stor-layout.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
69 /* The expressions for any register column that is saved. */
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
74 struct GTY(()) reg_saved_in_data
{
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
81 of one on the fly while processing the frame-related insns.
83 We create dw_trace_info structures for each extended basic block beginning
84 and ending at a "save point". Save points are labels, barriers, certain
85 notes, and of course the beginning and end of the function.
87 As we encounter control transfer insns, we propagate the "current"
88 row state across the edges to the starts of traces. When checking is
89 enabled, we validate that we propagate the same data from all sources.
91 All traces are members of the TRACE_INFO array, in the order in which
92 they appear in the instruction stream.
94 All save points are present in the TRACE_INDEX hash, mapping the insn
95 starting a trace to the dw_trace_info describing the trace. */
99 /* The insn that begins the trace. */
102 /* The row state at the beginning and end of the trace. */
103 dw_cfi_row
*beg_row
, *end_row
;
105 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
106 while scanning insns. However, the args_size value is irrelevant at
107 any point except can_throw_internal_p insns. Therefore the "delay"
108 sizes the values that must actually be emitted for this trace. */
109 poly_int64_pod beg_true_args_size
, end_true_args_size
;
110 poly_int64_pod beg_delay_args_size
, end_delay_args_size
;
112 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
115 /* The following variables contain data used in interpreting frame related
116 expressions. These are not part of the "real" row state as defined by
117 Dwarf, but it seems like they need to be propagated into a trace in case
118 frame related expressions have been sunk. */
119 /* ??? This seems fragile. These variables are fragments of a larger
120 expression. If we do not keep the entire expression together, we risk
121 not being able to put it together properly. Consider forcing targets
122 to generate self-contained expressions and dropping all of the magic
123 interpretation code in this file. Or at least refusing to shrink wrap
124 any frame related insn that doesn't contain a complete expression. */
126 /* The register used for saving registers to the stack, and its offset
128 dw_cfa_location cfa_store
;
130 /* A temporary register holding an integral value used in adjusting SP
131 or setting up the store_reg. The "offset" field holds the integer
132 value, not an offset. */
133 dw_cfa_location cfa_temp
;
135 /* A set of registers saved in other registers. This is the inverse of
136 the row->reg_save info, if the entry is a DW_CFA_register. This is
137 implemented as a flat array because it normally contains zero or 1
138 entry, depending on the target. IA-64 is the big spender here, using
139 a maximum of 5 entries. */
140 vec
<reg_saved_in_data
> regs_saved_in_regs
;
142 /* An identifier for this trace. Used only for debugging dumps. */
145 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
146 bool switch_sections
;
148 /* True if we've seen different values incoming to beg_true_args_size. */
149 bool args_size_undefined
;
153 /* Hashtable helpers. */
155 struct trace_info_hasher
: nofree_ptr_hash
<dw_trace_info
>
157 static inline hashval_t
hash (const dw_trace_info
*);
158 static inline bool equal (const dw_trace_info
*, const dw_trace_info
*);
162 trace_info_hasher::hash (const dw_trace_info
*ti
)
164 return INSN_UID (ti
->head
);
168 trace_info_hasher::equal (const dw_trace_info
*a
, const dw_trace_info
*b
)
170 return a
->head
== b
->head
;
174 /* The variables making up the pseudo-cfg, as described above. */
175 static vec
<dw_trace_info
> trace_info
;
176 static vec
<dw_trace_info
*> trace_work_list
;
177 static hash_table
<trace_info_hasher
> *trace_index
;
179 /* A vector of call frame insns for the CIE. */
182 /* The state of the first row of the FDE table, which includes the
183 state provided by the CIE. */
184 static GTY(()) dw_cfi_row
*cie_cfi_row
;
186 static GTY(()) reg_saved_in_data
*cie_return_save
;
188 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
190 /* The insn after which a new CFI note should be emitted. */
191 static rtx_insn
*add_cfi_insn
;
193 /* When non-null, add_cfi will add the CFI to this vector. */
194 static cfi_vec
*add_cfi_vec
;
196 /* The current instruction trace. */
197 static dw_trace_info
*cur_trace
;
199 /* The current, i.e. most recently generated, row of the CFI table. */
200 static dw_cfi_row
*cur_row
;
202 /* A copy of the current CFA, for use during the processing of a
204 static dw_cfa_location
*cur_cfa
;
206 /* We delay emitting a register save until either (a) we reach the end
207 of the prologue or (b) the register is clobbered. This clusters
208 register saves so that there are fewer pc advances. */
210 struct queued_reg_save
{
213 poly_int64_pod cfa_offset
;
217 static vec
<queued_reg_save
> queued_reg_saves
;
219 /* True if any CFI directives were emitted at the current insn. */
220 static bool any_cfis_emitted
;
222 /* Short-hand for commonly used register numbers. */
223 static unsigned dw_stack_pointer_regnum
;
224 static unsigned dw_frame_pointer_regnum
;
226 /* Hook used by __throw. */
229 expand_builtin_dwarf_sp_column (void)
231 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
232 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
235 /* MEM is a memory reference for the register size table, each element of
236 which has mode MODE. Initialize column C as a return address column. */
239 init_return_column_size (scalar_int_mode mode
, rtx mem
, unsigned int c
)
241 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
242 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
243 emit_move_insn (adjust_address (mem
, mode
, offset
),
244 gen_int_mode (size
, mode
));
247 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
248 init_one_dwarf_reg_size to communicate on what has been done by the
251 struct init_one_dwarf_reg_state
253 /* Whether the dwarf return column was initialized. */
254 bool wrote_return_column
;
256 /* For each hard register REGNO, whether init_one_dwarf_reg_size
257 was given REGNO to process already. */
258 bool processed_regno
[FIRST_PSEUDO_REGISTER
];
262 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
263 initialize the dwarf register size table entry corresponding to register
264 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
265 use for the size entry to initialize, and INIT_STATE is the communication
266 datastructure conveying what we're doing to our caller. */
269 void init_one_dwarf_reg_size (int regno
, machine_mode regmode
,
270 rtx table
, machine_mode slotmode
,
271 init_one_dwarf_reg_state
*init_state
)
273 const unsigned int dnum
= DWARF_FRAME_REGNUM (regno
);
274 const unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
275 const unsigned int dcol
= DWARF_REG_TO_UNWIND_COLUMN (rnum
);
277 poly_int64 slotoffset
= dcol
* GET_MODE_SIZE (slotmode
);
278 poly_int64 regsize
= GET_MODE_SIZE (regmode
);
280 init_state
->processed_regno
[regno
] = true;
282 if (rnum
>= DWARF_FRAME_REGISTERS
)
285 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
287 if (regmode
== VOIDmode
)
289 init_state
->wrote_return_column
= true;
292 /* ??? When is this true? Should it be a test based on DCOL instead? */
293 if (maybe_lt (slotoffset
, 0))
296 emit_move_insn (adjust_address (table
, slotmode
, slotoffset
),
297 gen_int_mode (regsize
, slotmode
));
300 /* Generate code to initialize the dwarf register size table located
301 at the provided ADDRESS. */
304 expand_builtin_init_dwarf_reg_sizes (tree address
)
307 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (char_type_node
);
308 rtx addr
= expand_normal (address
);
309 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
311 init_one_dwarf_reg_state init_state
;
313 memset ((char *)&init_state
, 0, sizeof (init_state
));
315 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
317 machine_mode save_mode
;
320 /* No point in processing a register multiple times. This could happen
321 with register spans, e.g. when a reg is first processed as a piece of
322 a span, then as a register on its own later on. */
324 if (init_state
.processed_regno
[i
])
327 save_mode
= targetm
.dwarf_frame_reg_mode (i
);
328 span
= targetm
.dwarf_register_span (gen_rtx_REG (save_mode
, i
));
331 init_one_dwarf_reg_size (i
, save_mode
, mem
, mode
, &init_state
);
334 for (int si
= 0; si
< XVECLEN (span
, 0); si
++)
336 rtx reg
= XVECEXP (span
, 0, si
);
338 init_one_dwarf_reg_size
339 (REGNO (reg
), GET_MODE (reg
), mem
, mode
, &init_state
);
344 if (!init_state
.wrote_return_column
)
345 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
347 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
348 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
351 targetm
.init_dwarf_reg_sizes_extra (address
);
355 static dw_trace_info
*
356 get_trace_info (rtx_insn
*insn
)
360 return trace_index
->find_with_hash (&dummy
, INSN_UID (insn
));
364 save_point_p (rtx_insn
*insn
)
366 /* Labels, except those that are really jump tables. */
368 return inside_basic_block_p (insn
);
370 /* We split traces at the prologue/epilogue notes because those
371 are points at which the unwind info is usually stable. This
372 makes it easier to find spots with identical unwind info so
373 that we can use remember/restore_state opcodes. */
375 switch (NOTE_KIND (insn
))
377 case NOTE_INSN_PROLOGUE_END
:
378 case NOTE_INSN_EPILOGUE_BEG
:
385 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
387 static inline HOST_WIDE_INT
388 div_data_align (HOST_WIDE_INT off
)
390 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
391 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
395 /* Return true if we need a signed version of a given opcode
396 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
399 need_data_align_sf_opcode (HOST_WIDE_INT off
)
401 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
404 /* Return a pointer to a newly allocated Call Frame Instruction. */
406 static inline dw_cfi_ref
409 dw_cfi_ref cfi
= ggc_alloc
<dw_cfi_node
> ();
411 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
412 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
417 /* Return a newly allocated CFI row, with no defined data. */
422 dw_cfi_row
*row
= ggc_cleared_alloc
<dw_cfi_row
> ();
424 row
->cfa
.reg
= INVALID_REGNUM
;
429 /* Return a copy of an existing CFI row. */
432 copy_cfi_row (dw_cfi_row
*src
)
434 dw_cfi_row
*dst
= ggc_alloc
<dw_cfi_row
> ();
437 dst
->reg_save
= vec_safe_copy (src
->reg_save
);
442 /* Return a copy of an existing CFA location. */
444 static dw_cfa_location
*
445 copy_cfa (dw_cfa_location
*src
)
447 dw_cfa_location
*dst
= ggc_alloc
<dw_cfa_location
> ();
452 /* Generate a new label for the CFI info to refer to. */
455 dwarf2out_cfi_label (void)
457 int num
= dwarf2out_cfi_label_num
++;
460 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
462 return xstrdup (label
);
465 /* Add CFI either to the current insn stream or to a vector, or both. */
468 add_cfi (dw_cfi_ref cfi
)
470 any_cfis_emitted
= true;
472 if (add_cfi_insn
!= NULL
)
474 add_cfi_insn
= emit_note_after (NOTE_INSN_CFI
, add_cfi_insn
);
475 NOTE_CFI (add_cfi_insn
) = cfi
;
478 if (add_cfi_vec
!= NULL
)
479 vec_safe_push (*add_cfi_vec
, cfi
);
483 add_cfi_args_size (poly_int64 size
)
485 /* We don't yet have a representation for polynomial sizes. */
486 HOST_WIDE_INT const_size
= size
.to_constant ();
488 dw_cfi_ref cfi
= new_cfi ();
490 /* While we can occasionally have args_size < 0 internally, this state
491 should not persist at a point we actually need an opcode. */
492 gcc_assert (const_size
>= 0);
494 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
495 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= const_size
;
501 add_cfi_restore (unsigned reg
)
503 dw_cfi_ref cfi
= new_cfi ();
505 cfi
->dw_cfi_opc
= (reg
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
506 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
511 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
512 that the register column is no longer saved. */
515 update_row_reg_save (dw_cfi_row
*row
, unsigned column
, dw_cfi_ref cfi
)
517 if (vec_safe_length (row
->reg_save
) <= column
)
518 vec_safe_grow_cleared (row
->reg_save
, column
+ 1);
519 (*row
->reg_save
)[column
] = cfi
;
522 /* This function fills in aa dw_cfa_location structure from a dwarf location
523 descriptor sequence. */
526 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_node
*loc
)
528 struct dw_loc_descr_node
*ptr
;
530 cfa
->base_offset
= 0;
534 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
536 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
572 cfa
->reg
= op
- DW_OP_reg0
;
575 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
609 cfa
->reg
= op
- DW_OP_breg0
;
610 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
613 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
614 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
619 case DW_OP_plus_uconst
:
620 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
628 /* Find the previous value for the CFA, iteratively. CFI is the opcode
629 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
630 one level of remember/restore state processing. */
633 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
635 switch (cfi
->dw_cfi_opc
)
637 case DW_CFA_def_cfa_offset
:
638 case DW_CFA_def_cfa_offset_sf
:
639 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
641 case DW_CFA_def_cfa_register
:
642 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
645 case DW_CFA_def_cfa_sf
:
646 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
647 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
649 case DW_CFA_def_cfa_expression
:
650 if (cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
)
651 *loc
= *cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
;
653 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
656 case DW_CFA_remember_state
:
657 gcc_assert (!remember
->in_use
);
659 remember
->in_use
= 1;
661 case DW_CFA_restore_state
:
662 gcc_assert (remember
->in_use
);
664 remember
->in_use
= 0;
672 /* Determine if two dw_cfa_location structures define the same data. */
675 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
677 return (loc1
->reg
== loc2
->reg
678 && known_eq (loc1
->offset
, loc2
->offset
)
679 && loc1
->indirect
== loc2
->indirect
680 && (loc1
->indirect
== 0
681 || known_eq (loc1
->base_offset
, loc2
->base_offset
)));
684 /* Determine if two CFI operands are identical. */
687 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t
, dw_cfi_oprnd
*a
, dw_cfi_oprnd
*b
)
691 case dw_cfi_oprnd_unused
:
693 case dw_cfi_oprnd_reg_num
:
694 return a
->dw_cfi_reg_num
== b
->dw_cfi_reg_num
;
695 case dw_cfi_oprnd_offset
:
696 return a
->dw_cfi_offset
== b
->dw_cfi_offset
;
697 case dw_cfi_oprnd_addr
:
698 return (a
->dw_cfi_addr
== b
->dw_cfi_addr
699 || strcmp (a
->dw_cfi_addr
, b
->dw_cfi_addr
) == 0);
700 case dw_cfi_oprnd_loc
:
701 return loc_descr_equal_p (a
->dw_cfi_loc
, b
->dw_cfi_loc
);
702 case dw_cfi_oprnd_cfa_loc
:
703 return cfa_equal_p (a
->dw_cfi_cfa_loc
, b
->dw_cfi_cfa_loc
);
708 /* Determine if two CFI entries are identical. */
711 cfi_equal_p (dw_cfi_ref a
, dw_cfi_ref b
)
713 enum dwarf_call_frame_info opc
;
715 /* Make things easier for our callers, including missing operands. */
718 if (a
== NULL
|| b
== NULL
)
721 /* Obviously, the opcodes must match. */
723 if (opc
!= b
->dw_cfi_opc
)
726 /* Compare the two operands, re-using the type of the operands as
727 already exposed elsewhere. */
728 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc
),
729 &a
->dw_cfi_oprnd1
, &b
->dw_cfi_oprnd1
)
730 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc
),
731 &a
->dw_cfi_oprnd2
, &b
->dw_cfi_oprnd2
));
734 /* Determine if two CFI_ROW structures are identical. */
737 cfi_row_equal_p (dw_cfi_row
*a
, dw_cfi_row
*b
)
739 size_t i
, n_a
, n_b
, n_max
;
743 if (!cfi_equal_p (a
->cfa_cfi
, b
->cfa_cfi
))
746 else if (!cfa_equal_p (&a
->cfa
, &b
->cfa
))
749 n_a
= vec_safe_length (a
->reg_save
);
750 n_b
= vec_safe_length (b
->reg_save
);
751 n_max
= MAX (n_a
, n_b
);
753 for (i
= 0; i
< n_max
; ++i
)
755 dw_cfi_ref r_a
= NULL
, r_b
= NULL
;
758 r_a
= (*a
->reg_save
)[i
];
760 r_b
= (*b
->reg_save
)[i
];
762 if (!cfi_equal_p (r_a
, r_b
))
769 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
770 what opcode to emit. Returns the CFI opcode to effect the change, or
771 NULL if NEW_CFA == OLD_CFA. */
774 def_cfa_0 (dw_cfa_location
*old_cfa
, dw_cfa_location
*new_cfa
)
778 /* If nothing changed, no need to issue any call frame instructions. */
779 if (cfa_equal_p (old_cfa
, new_cfa
))
784 HOST_WIDE_INT const_offset
;
785 if (new_cfa
->reg
== old_cfa
->reg
786 && !new_cfa
->indirect
787 && !old_cfa
->indirect
788 && new_cfa
->offset
.is_constant (&const_offset
))
790 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
791 the CFA register did not change but the offset did. The data
792 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
793 in the assembler via the .cfi_def_cfa_offset directive. */
794 if (const_offset
< 0)
795 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
797 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
798 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= const_offset
;
800 else if (new_cfa
->offset
.is_constant ()
801 && known_eq (new_cfa
->offset
, old_cfa
->offset
)
802 && old_cfa
->reg
!= INVALID_REGNUM
803 && !new_cfa
->indirect
804 && !old_cfa
->indirect
)
806 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
807 indicating the CFA register has changed to <register> but the
808 offset has not changed. This requires the old CFA to have
809 been set as a register plus offset rather than a general
810 DW_CFA_def_cfa_expression. */
811 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
812 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
814 else if (new_cfa
->indirect
== 0
815 && new_cfa
->offset
.is_constant (&const_offset
))
817 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
818 indicating the CFA register has changed to <register> with
819 the specified offset. The data factoring for DW_CFA_def_cfa_sf
820 happens in output_cfi, or in the assembler via the .cfi_def_cfa
822 if (const_offset
< 0)
823 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
825 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
826 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
827 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= const_offset
;
831 /* Construct a DW_CFA_def_cfa_expression instruction to
832 calculate the CFA using a full location expression since no
833 register-offset pair is available. */
834 struct dw_loc_descr_node
*loc_list
;
836 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
837 loc_list
= build_cfa_loc (new_cfa
, 0);
838 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
839 if (!new_cfa
->offset
.is_constant ()
840 || !new_cfa
->base_offset
.is_constant ())
841 /* It's hard to reconstruct the CFA location for a polynomial
842 expression, so just cache it instead. */
843 cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
= copy_cfa (new_cfa
);
845 cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
= NULL
;
851 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
854 def_cfa_1 (dw_cfa_location
*new_cfa
)
858 if (cur_trace
->cfa_store
.reg
== new_cfa
->reg
&& new_cfa
->indirect
== 0)
859 cur_trace
->cfa_store
.offset
= new_cfa
->offset
;
861 cfi
= def_cfa_0 (&cur_row
->cfa
, new_cfa
);
864 cur_row
->cfa
= *new_cfa
;
865 cur_row
->cfa_cfi
= (cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
872 /* Add the CFI for saving a register. REG is the CFA column number.
873 If SREG is -1, the register is saved at OFFSET from the CFA;
874 otherwise it is saved in SREG. */
877 reg_save (unsigned int reg
, unsigned int sreg
, poly_int64 offset
)
879 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
880 dw_cfi_ref cfi
= new_cfi ();
882 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
884 if (sreg
== INVALID_REGNUM
)
886 HOST_WIDE_INT const_offset
;
887 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
888 if (fde
&& fde
->stack_realign
)
890 cfi
->dw_cfi_opc
= DW_CFA_expression
;
891 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
892 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
893 = build_cfa_aligned_loc (&cur_row
->cfa
, offset
,
894 fde
->stack_realignment
);
896 else if (offset
.is_constant (&const_offset
))
898 if (need_data_align_sf_opcode (const_offset
))
899 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
900 else if (reg
& ~0x3f)
901 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
903 cfi
->dw_cfi_opc
= DW_CFA_offset
;
904 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= const_offset
;
908 cfi
->dw_cfi_opc
= DW_CFA_expression
;
909 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
910 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
911 = build_cfa_loc (&cur_row
->cfa
, offset
);
914 else if (sreg
== reg
)
916 /* While we could emit something like DW_CFA_same_value or
917 DW_CFA_restore, we never expect to see something like that
918 in a prologue. This is more likely to be a bug. A backend
919 can always bypass this by using REG_CFA_RESTORE directly. */
924 cfi
->dw_cfi_opc
= DW_CFA_register
;
925 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
929 update_row_reg_save (cur_row
, reg
, cfi
);
932 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
933 and adjust data structures to match. */
936 notice_args_size (rtx_insn
*insn
)
938 poly_int64 args_size
, delta
;
941 note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL
);
945 args_size
= get_args_size (note
);
946 delta
= args_size
- cur_trace
->end_true_args_size
;
947 if (known_eq (delta
, 0))
950 cur_trace
->end_true_args_size
= args_size
;
952 /* If the CFA is computed off the stack pointer, then we must adjust
953 the computation of the CFA as well. */
954 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
956 gcc_assert (!cur_cfa
->indirect
);
958 /* Convert a change in args_size (always a positive in the
959 direction of stack growth) to a change in stack pointer. */
960 if (!STACK_GROWS_DOWNWARD
)
963 cur_cfa
->offset
+= delta
;
967 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
968 data within the trace related to EH insns and args_size. */
971 notice_eh_throw (rtx_insn
*insn
)
973 poly_int64 args_size
= cur_trace
->end_true_args_size
;
974 if (cur_trace
->eh_head
== NULL
)
976 cur_trace
->eh_head
= insn
;
977 cur_trace
->beg_delay_args_size
= args_size
;
978 cur_trace
->end_delay_args_size
= args_size
;
980 else if (maybe_ne (cur_trace
->end_delay_args_size
, args_size
))
982 cur_trace
->end_delay_args_size
= args_size
;
984 /* ??? If the CFA is the stack pointer, search backward for the last
985 CFI note and insert there. Given that the stack changed for the
986 args_size change, there *must* be such a note in between here and
988 add_cfi_args_size (args_size
);
992 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
993 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
994 used in places where rtl is prohibited. */
996 static inline unsigned
997 dwf_regno (const_rtx reg
)
999 gcc_assert (REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
1000 return DWARF_FRAME_REGNUM (REGNO (reg
));
1003 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1006 compare_reg_or_pc (rtx x
, rtx y
)
1008 if (REG_P (x
) && REG_P (y
))
1009 return REGNO (x
) == REGNO (y
);
1013 /* Record SRC as being saved in DEST. DEST may be null to delete an
1014 existing entry. SRC may be a register or PC_RTX. */
1017 record_reg_saved_in_reg (rtx dest
, rtx src
)
1019 reg_saved_in_data
*elt
;
1022 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, elt
)
1023 if (compare_reg_or_pc (elt
->orig_reg
, src
))
1026 cur_trace
->regs_saved_in_regs
.unordered_remove (i
);
1028 elt
->saved_in_reg
= dest
;
1035 reg_saved_in_data e
= {src
, dest
};
1036 cur_trace
->regs_saved_in_regs
.safe_push (e
);
1039 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1040 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1043 queue_reg_save (rtx reg
, rtx sreg
, poly_int64 offset
)
1046 queued_reg_save e
= {reg
, sreg
, offset
};
1049 /* Duplicates waste space, but it's also necessary to remove them
1050 for correctness, since the queue gets output in reverse order. */
1051 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1052 if (compare_reg_or_pc (q
->reg
, reg
))
1058 queued_reg_saves
.safe_push (e
);
1061 /* Output all the entries in QUEUED_REG_SAVES. */
1064 dwarf2out_flush_queued_reg_saves (void)
1069 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1071 unsigned int reg
, sreg
;
1073 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1075 if (q
->reg
== pc_rtx
)
1076 reg
= DWARF_FRAME_RETURN_COLUMN
;
1078 reg
= dwf_regno (q
->reg
);
1080 sreg
= dwf_regno (q
->saved_reg
);
1082 sreg
= INVALID_REGNUM
;
1083 reg_save (reg
, sreg
, q
->cfa_offset
);
1086 queued_reg_saves
.truncate (0);
1089 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1090 location for? Or, does it clobber a register which we've previously
1091 said that some other register is saved in, and for which we now
1092 have a new location for? */
1095 clobbers_queued_reg_save (const_rtx insn
)
1100 FOR_EACH_VEC_ELT (queued_reg_saves
, iq
, q
)
1103 reg_saved_in_data
*rir
;
1105 if (modified_in_p (q
->reg
, insn
))
1108 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, ir
, rir
)
1109 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1110 && modified_in_p (rir
->saved_in_reg
, insn
))
1117 /* What register, if any, is currently saved in REG? */
1120 reg_saved_in (rtx reg
)
1122 unsigned int regn
= REGNO (reg
);
1124 reg_saved_in_data
*rir
;
1127 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1128 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1131 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, rir
)
1132 if (regn
== REGNO (rir
->saved_in_reg
))
1133 return rir
->orig_reg
;
1138 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1141 dwarf2out_frame_debug_def_cfa (rtx pat
)
1143 memset (cur_cfa
, 0, sizeof (*cur_cfa
));
1145 pat
= strip_offset (pat
, &cur_cfa
->offset
);
1148 cur_cfa
->indirect
= 1;
1149 pat
= strip_offset (XEXP (pat
, 0), &cur_cfa
->base_offset
);
1151 /* ??? If this fails, we could be calling into the _loc functions to
1152 define a full expression. So far no port does that. */
1153 gcc_assert (REG_P (pat
));
1154 cur_cfa
->reg
= dwf_regno (pat
);
1157 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1160 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1164 gcc_assert (GET_CODE (pat
) == SET
);
1165 dest
= XEXP (pat
, 0);
1166 src
= XEXP (pat
, 1);
1168 switch (GET_CODE (src
))
1171 gcc_assert (dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1172 cur_cfa
->offset
-= rtx_to_poly_int64 (XEXP (src
, 1));
1182 cur_cfa
->reg
= dwf_regno (dest
);
1183 gcc_assert (cur_cfa
->indirect
== 0);
1186 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1189 dwarf2out_frame_debug_cfa_offset (rtx set
)
1192 rtx src
, addr
, span
;
1193 unsigned int sregno
;
1195 src
= XEXP (set
, 1);
1196 addr
= XEXP (set
, 0);
1197 gcc_assert (MEM_P (addr
));
1198 addr
= XEXP (addr
, 0);
1200 /* As documented, only consider extremely simple addresses. */
1201 switch (GET_CODE (addr
))
1204 gcc_assert (dwf_regno (addr
) == cur_cfa
->reg
);
1205 offset
= -cur_cfa
->offset
;
1208 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cur_cfa
->reg
);
1209 offset
= rtx_to_poly_int64 (XEXP (addr
, 1)) - cur_cfa
->offset
;
1218 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1222 span
= targetm
.dwarf_register_span (src
);
1223 sregno
= dwf_regno (src
);
1226 /* ??? We'd like to use queue_reg_save, but we need to come up with
1227 a different flushing heuristic for epilogues. */
1229 reg_save (sregno
, INVALID_REGNUM
, offset
);
1232 /* We have a PARALLEL describing where the contents of SRC live.
1233 Adjust the offset for each piece of the PARALLEL. */
1234 poly_int64 span_offset
= offset
;
1236 gcc_assert (GET_CODE (span
) == PARALLEL
);
1238 const int par_len
= XVECLEN (span
, 0);
1239 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1241 rtx elem
= XVECEXP (span
, 0, par_index
);
1242 sregno
= dwf_regno (src
);
1243 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1244 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1249 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1252 dwarf2out_frame_debug_cfa_register (rtx set
)
1255 unsigned sregno
, dregno
;
1257 src
= XEXP (set
, 1);
1258 dest
= XEXP (set
, 0);
1260 record_reg_saved_in_reg (dest
, src
);
1262 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1264 sregno
= dwf_regno (src
);
1266 dregno
= dwf_regno (dest
);
1268 /* ??? We'd like to use queue_reg_save, but we need to come up with
1269 a different flushing heuristic for epilogues. */
1270 reg_save (sregno
, dregno
, 0);
1273 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1276 dwarf2out_frame_debug_cfa_expression (rtx set
)
1278 rtx src
, dest
, span
;
1279 dw_cfi_ref cfi
= new_cfi ();
1282 dest
= SET_DEST (set
);
1283 src
= SET_SRC (set
);
1285 gcc_assert (REG_P (src
));
1286 gcc_assert (MEM_P (dest
));
1288 span
= targetm
.dwarf_register_span (src
);
1291 regno
= dwf_regno (src
);
1293 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1294 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1295 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1296 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1297 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1299 /* ??? We'd like to use queue_reg_save, were the interface different,
1300 and, as above, we could manage flushing for epilogues. */
1302 update_row_reg_save (cur_row
, regno
, cfi
);
1305 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1309 dwarf2out_frame_debug_cfa_val_expression (rtx set
)
1311 rtx dest
= SET_DEST (set
);
1312 gcc_assert (REG_P (dest
));
1314 rtx span
= targetm
.dwarf_register_span (dest
);
1317 rtx src
= SET_SRC (set
);
1318 dw_cfi_ref cfi
= new_cfi ();
1319 cfi
->dw_cfi_opc
= DW_CFA_val_expression
;
1320 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= dwf_regno (dest
);
1321 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1322 = mem_loc_descriptor (src
, GET_MODE (src
),
1323 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1325 update_row_reg_save (cur_row
, dwf_regno (dest
), cfi
);
1328 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1331 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1333 gcc_assert (REG_P (reg
));
1335 rtx span
= targetm
.dwarf_register_span (reg
);
1338 unsigned int regno
= dwf_regno (reg
);
1339 add_cfi_restore (regno
);
1340 update_row_reg_save (cur_row
, regno
, NULL
);
1344 /* We have a PARALLEL describing where the contents of REG live.
1345 Restore the register for each piece of the PARALLEL. */
1346 gcc_assert (GET_CODE (span
) == PARALLEL
);
1348 const int par_len
= XVECLEN (span
, 0);
1349 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1351 reg
= XVECEXP (span
, 0, par_index
);
1352 gcc_assert (REG_P (reg
));
1353 unsigned int regno
= dwf_regno (reg
);
1354 add_cfi_restore (regno
);
1355 update_row_reg_save (cur_row
, regno
, NULL
);
1360 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1361 ??? Perhaps we should note in the CIE where windows are saved (instead of
1362 assuming 0(cfa)) and what registers are in the window. */
1365 dwarf2out_frame_debug_cfa_window_save (void)
1367 dw_cfi_ref cfi
= new_cfi ();
1369 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1373 /* Record call frame debugging information for an expression EXPR,
1374 which either sets SP or FP (adjusting how we calculate the frame
1375 address) or saves a register to the stack or another register.
1376 LABEL indicates the address of EXPR.
1378 This function encodes a state machine mapping rtxes to actions on
1379 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1380 users need not read the source code.
1382 The High-Level Picture
1384 Changes in the register we use to calculate the CFA: Currently we
1385 assume that if you copy the CFA register into another register, we
1386 should take the other one as the new CFA register; this seems to
1387 work pretty well. If it's wrong for some target, it's simple
1388 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1390 Changes in the register we use for saving registers to the stack:
1391 This is usually SP, but not always. Again, we deduce that if you
1392 copy SP into another register (and SP is not the CFA register),
1393 then the new register is the one we will be using for register
1394 saves. This also seems to work.
1396 Register saves: There's not much guesswork about this one; if
1397 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1398 register save, and the register used to calculate the destination
1399 had better be the one we think we're using for this purpose.
1400 It's also assumed that a copy from a call-saved register to another
1401 register is saving that register if RTX_FRAME_RELATED_P is set on
1402 that instruction. If the copy is from a call-saved register to
1403 the *same* register, that means that the register is now the same
1404 value as in the caller.
1406 Except: If the register being saved is the CFA register, and the
1407 offset is nonzero, we are saving the CFA, so we assume we have to
1408 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1409 the intent is to save the value of SP from the previous frame.
1411 In addition, if a register has previously been saved to a different
1414 Invariants / Summaries of Rules
1416 cfa current rule for calculating the CFA. It usually
1417 consists of a register and an offset. This is
1418 actually stored in *cur_cfa, but abbreviated
1419 for the purposes of this documentation.
1420 cfa_store register used by prologue code to save things to the stack
1421 cfa_store.offset is the offset from the value of
1422 cfa_store.reg to the actual CFA
1423 cfa_temp register holding an integral value. cfa_temp.offset
1424 stores the value, which will be used to adjust the
1425 stack pointer. cfa_temp is also used like cfa_store,
1426 to track stores to the stack via fp or a temp reg.
1428 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1429 with cfa.reg as the first operand changes the cfa.reg and its
1430 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1433 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1434 expression yielding a constant. This sets cfa_temp.reg
1435 and cfa_temp.offset.
1437 Rule 5: Create a new register cfa_store used to save items to the
1440 Rules 10-14: Save a register to the stack. Define offset as the
1441 difference of the original location and cfa_store's
1442 location (or cfa_temp's location if cfa_temp is used).
1444 Rules 16-20: If AND operation happens on sp in prologue, we assume
1445 stack is realigned. We will use a group of DW_OP_XXX
1446 expressions to represent the location of the stored
1447 register instead of CFA+offset.
1451 "{a,b}" indicates a choice of a xor b.
1452 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1455 (set <reg1> <reg2>:cfa.reg)
1456 effects: cfa.reg = <reg1>
1457 cfa.offset unchanged
1458 cfa_temp.reg = <reg1>
1459 cfa_temp.offset = cfa.offset
1462 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1463 {<const_int>,<reg>:cfa_temp.reg}))
1464 effects: cfa.reg = sp if fp used
1465 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1466 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1467 if cfa_store.reg==sp
1470 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1471 effects: cfa.reg = fp
1472 cfa_offset += +/- <const_int>
1475 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1476 constraints: <reg1> != fp
1478 effects: cfa.reg = <reg1>
1479 cfa_temp.reg = <reg1>
1480 cfa_temp.offset = cfa.offset
1483 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1484 constraints: <reg1> != fp
1486 effects: cfa_store.reg = <reg1>
1487 cfa_store.offset = cfa.offset - cfa_temp.offset
1490 (set <reg> <const_int>)
1491 effects: cfa_temp.reg = <reg>
1492 cfa_temp.offset = <const_int>
1495 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1496 effects: cfa_temp.reg = <reg1>
1497 cfa_temp.offset |= <const_int>
1500 (set <reg> (high <exp>))
1504 (set <reg> (lo_sum <exp> <const_int>))
1505 effects: cfa_temp.reg = <reg>
1506 cfa_temp.offset = <const_int>
1509 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1510 effects: cfa_store.offset -= <const_int>
1511 cfa.offset = cfa_store.offset if cfa.reg == sp
1513 cfa.base_offset = -cfa_store.offset
1516 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1517 effects: cfa_store.offset += -/+ mode_size(mem)
1518 cfa.offset = cfa_store.offset if cfa.reg == sp
1520 cfa.base_offset = -cfa_store.offset
1523 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1526 effects: cfa.reg = <reg1>
1527 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1530 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1531 effects: cfa.reg = <reg1>
1532 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1535 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1536 effects: cfa.reg = <reg1>
1537 cfa.base_offset = -cfa_temp.offset
1538 cfa_temp.offset -= mode_size(mem)
1541 (set <reg> {unspec, unspec_volatile})
1542 effects: target-dependent
1545 (set sp (and: sp <const_int>))
1546 constraints: cfa_store.reg == sp
1547 effects: cfun->fde.stack_realign = 1
1548 cfa_store.offset = 0
1549 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1552 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1553 effects: cfa_store.offset += -/+ mode_size(mem)
1556 (set (mem ({pre_inc, pre_dec} sp)) fp)
1557 constraints: fde->stack_realign == 1
1558 effects: cfa_store.offset = 0
1559 cfa.reg != HARD_FRAME_POINTER_REGNUM
1562 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1563 constraints: fde->stack_realign == 1
1565 && cfa.indirect == 0
1566 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1567 effects: Use DW_CFA_def_cfa_expression to define cfa
1568 cfa.reg == fde->drap_reg */
1571 dwarf2out_frame_debug_expr (rtx expr
)
1573 rtx src
, dest
, span
;
1577 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1578 the PARALLEL independently. The first element is always processed if
1579 it is a SET. This is for backward compatibility. Other elements
1580 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1581 flag is set in them. */
1582 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1585 int limit
= XVECLEN (expr
, 0);
1588 /* PARALLELs have strict read-modify-write semantics, so we
1589 ought to evaluate every rvalue before changing any lvalue.
1590 It's cumbersome to do that in general, but there's an
1591 easy approximation that is enough for all current users:
1592 handle register saves before register assignments. */
1593 if (GET_CODE (expr
) == PARALLEL
)
1594 for (par_index
= 0; par_index
< limit
; par_index
++)
1596 elem
= XVECEXP (expr
, 0, par_index
);
1597 if (GET_CODE (elem
) == SET
1598 && MEM_P (SET_DEST (elem
))
1599 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1600 dwarf2out_frame_debug_expr (elem
);
1603 for (par_index
= 0; par_index
< limit
; par_index
++)
1605 elem
= XVECEXP (expr
, 0, par_index
);
1606 if (GET_CODE (elem
) == SET
1607 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1608 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1609 dwarf2out_frame_debug_expr (elem
);
1614 gcc_assert (GET_CODE (expr
) == SET
);
1616 src
= SET_SRC (expr
);
1617 dest
= SET_DEST (expr
);
1621 rtx rsi
= reg_saved_in (src
);
1628 switch (GET_CODE (dest
))
1631 switch (GET_CODE (src
))
1633 /* Setting FP from SP. */
1635 if (cur_cfa
->reg
== dwf_regno (src
))
1638 /* Update the CFA rule wrt SP or FP. Make sure src is
1639 relative to the current CFA register.
1641 We used to require that dest be either SP or FP, but the
1642 ARM copies SP to a temporary register, and from there to
1643 FP. So we just rely on the backends to only set
1644 RTX_FRAME_RELATED_P on appropriate insns. */
1645 cur_cfa
->reg
= dwf_regno (dest
);
1646 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1647 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1651 /* Saving a register in a register. */
1652 gcc_assert (!fixed_regs
[REGNO (dest
)]
1653 /* For the SPARC and its register window. */
1654 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1656 /* After stack is aligned, we can only save SP in FP
1657 if drap register is used. In this case, we have
1658 to restore stack pointer with the CFA value and we
1659 don't generate this DWARF information. */
1661 && fde
->stack_realign
1662 && REGNO (src
) == STACK_POINTER_REGNUM
)
1663 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1664 && fde
->drap_reg
!= INVALID_REGNUM
1665 && cur_cfa
->reg
!= dwf_regno (src
));
1667 queue_reg_save (src
, dest
, 0);
1674 if (dest
== stack_pointer_rtx
)
1678 if (REG_P (XEXP (src
, 1)))
1680 gcc_assert (dwf_regno (XEXP (src
, 1))
1681 == cur_trace
->cfa_temp
.reg
);
1682 offset
= cur_trace
->cfa_temp
.offset
;
1684 else if (!poly_int_rtx_p (XEXP (src
, 1), &offset
))
1687 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1689 /* Restoring SP from FP in the epilogue. */
1690 gcc_assert (cur_cfa
->reg
== dw_frame_pointer_regnum
);
1691 cur_cfa
->reg
= dw_stack_pointer_regnum
;
1693 else if (GET_CODE (src
) == LO_SUM
)
1694 /* Assume we've set the source reg of the LO_SUM from sp. */
1697 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1699 if (GET_CODE (src
) != MINUS
)
1701 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1702 cur_cfa
->offset
+= offset
;
1703 if (cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
)
1704 cur_trace
->cfa_store
.offset
+= offset
;
1706 else if (dest
== hard_frame_pointer_rtx
)
1709 /* Either setting the FP from an offset of the SP,
1710 or adjusting the FP */
1711 gcc_assert (frame_pointer_needed
);
1713 gcc_assert (REG_P (XEXP (src
, 0))
1714 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1715 offset
= rtx_to_poly_int64 (XEXP (src
, 1));
1716 if (GET_CODE (src
) != MINUS
)
1718 cur_cfa
->offset
+= offset
;
1719 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1723 gcc_assert (GET_CODE (src
) != MINUS
);
1726 if (REG_P (XEXP (src
, 0))
1727 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1728 && poly_int_rtx_p (XEXP (src
, 1), &offset
))
1730 /* Setting a temporary CFA register that will be copied
1731 into the FP later on. */
1733 cur_cfa
->offset
+= offset
;
1734 cur_cfa
->reg
= dwf_regno (dest
);
1735 /* Or used to save regs to the stack. */
1736 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1737 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1741 else if (REG_P (XEXP (src
, 0))
1742 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1743 && XEXP (src
, 1) == stack_pointer_rtx
)
1745 /* Setting a scratch register that we will use instead
1746 of SP for saving registers to the stack. */
1747 gcc_assert (cur_cfa
->reg
== dw_stack_pointer_regnum
);
1748 cur_trace
->cfa_store
.reg
= dwf_regno (dest
);
1749 cur_trace
->cfa_store
.offset
1750 = cur_cfa
->offset
- cur_trace
->cfa_temp
.offset
;
1754 else if (GET_CODE (src
) == LO_SUM
1755 && poly_int_rtx_p (XEXP (src
, 1),
1756 &cur_trace
->cfa_temp
.offset
))
1757 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1766 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1767 cur_trace
->cfa_temp
.offset
= rtx_to_poly_int64 (src
);
1772 gcc_assert (REG_P (XEXP (src
, 0))
1773 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1774 && CONST_INT_P (XEXP (src
, 1)));
1776 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1777 if (!can_ior_p (cur_trace
->cfa_temp
.offset
, INTVAL (XEXP (src
, 1)),
1778 &cur_trace
->cfa_temp
.offset
))
1779 /* The target shouldn't generate this kind of CFI note if we
1780 can't represent it. */
1784 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1785 which will fill in all of the bits. */
1792 case UNSPEC_VOLATILE
:
1793 /* All unspecs should be represented by REG_CFA_* notes. */
1799 /* If this AND operation happens on stack pointer in prologue,
1800 we assume the stack is realigned and we extract the
1802 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1804 /* We interpret reg_save differently with stack_realign set.
1805 Thus we must flush whatever we have queued first. */
1806 dwarf2out_flush_queued_reg_saves ();
1808 gcc_assert (cur_trace
->cfa_store
.reg
1809 == dwf_regno (XEXP (src
, 0)));
1810 fde
->stack_realign
= 1;
1811 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1812 cur_trace
->cfa_store
.offset
= 0;
1814 if (cur_cfa
->reg
!= dw_stack_pointer_regnum
1815 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1816 fde
->drap_reg
= cur_cfa
->reg
;
1827 /* Saving a register to the stack. Make sure dest is relative to the
1829 switch (GET_CODE (XEXP (dest
, 0)))
1835 /* We can't handle variable size modifications. */
1836 offset
= -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1838 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1839 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1841 cur_trace
->cfa_store
.offset
+= offset
;
1842 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1843 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1845 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1846 offset
-= cur_trace
->cfa_store
.offset
;
1848 offset
= -cur_trace
->cfa_store
.offset
;
1855 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1856 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1859 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1860 == STACK_POINTER_REGNUM
)
1861 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1863 cur_trace
->cfa_store
.offset
+= offset
;
1865 /* Rule 18: If stack is aligned, we will use FP as a
1866 reference to represent the address of the stored
1869 && fde
->stack_realign
1871 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
)
1873 gcc_assert (cur_cfa
->reg
!= dw_frame_pointer_regnum
);
1874 cur_trace
->cfa_store
.offset
= 0;
1877 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1878 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1880 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1881 offset
+= -cur_trace
->cfa_store
.offset
;
1883 offset
= -cur_trace
->cfa_store
.offset
;
1887 /* With an offset. */
1894 gcc_assert (REG_P (XEXP (XEXP (dest
, 0), 0)));
1895 offset
= rtx_to_poly_int64 (XEXP (XEXP (dest
, 0), 1));
1896 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1899 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1901 if (cur_cfa
->reg
== regno
)
1902 offset
-= cur_cfa
->offset
;
1903 else if (cur_trace
->cfa_store
.reg
== regno
)
1904 offset
-= cur_trace
->cfa_store
.offset
;
1907 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1908 offset
-= cur_trace
->cfa_temp
.offset
;
1914 /* Without an offset. */
1917 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1919 if (cur_cfa
->reg
== regno
)
1920 offset
= -cur_cfa
->offset
;
1921 else if (cur_trace
->cfa_store
.reg
== regno
)
1922 offset
= -cur_trace
->cfa_store
.offset
;
1925 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1926 offset
= -cur_trace
->cfa_temp
.offset
;
1933 gcc_assert (cur_trace
->cfa_temp
.reg
1934 == dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1935 offset
= -cur_trace
->cfa_temp
.offset
;
1936 cur_trace
->cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1944 /* If the source operand of this MEM operation is a memory,
1945 we only care how much stack grew. */
1950 && REGNO (src
) != STACK_POINTER_REGNUM
1951 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1952 && dwf_regno (src
) == cur_cfa
->reg
)
1954 /* We're storing the current CFA reg into the stack. */
1956 if (known_eq (cur_cfa
->offset
, 0))
1959 /* If stack is aligned, putting CFA reg into stack means
1960 we can no longer use reg + offset to represent CFA.
1961 Here we use DW_CFA_def_cfa_expression instead. The
1962 result of this expression equals to the original CFA
1965 && fde
->stack_realign
1966 && cur_cfa
->indirect
== 0
1967 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1969 gcc_assert (fde
->drap_reg
== cur_cfa
->reg
);
1971 cur_cfa
->indirect
= 1;
1972 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1973 cur_cfa
->base_offset
= offset
;
1974 cur_cfa
->offset
= 0;
1976 fde
->drap_reg_saved
= 1;
1980 /* If the source register is exactly the CFA, assume
1981 we're saving SP like any other register; this happens
1983 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1988 /* Otherwise, we'll need to look in the stack to
1989 calculate the CFA. */
1990 rtx x
= XEXP (dest
, 0);
1994 gcc_assert (REG_P (x
));
1996 cur_cfa
->reg
= dwf_regno (x
);
1997 cur_cfa
->base_offset
= offset
;
1998 cur_cfa
->indirect
= 1;
2004 span
= targetm
.dwarf_register_span (src
);
2009 queue_reg_save (src
, NULL_RTX
, offset
);
2012 /* We have a PARALLEL describing where the contents of SRC live.
2013 Queue register saves for each piece of the PARALLEL. */
2014 poly_int64 span_offset
= offset
;
2016 gcc_assert (GET_CODE (span
) == PARALLEL
);
2018 const int par_len
= XVECLEN (span
, 0);
2019 for (int par_index
= 0; par_index
< par_len
; par_index
++)
2021 rtx elem
= XVECEXP (span
, 0, par_index
);
2022 queue_reg_save (elem
, NULL_RTX
, span_offset
);
2023 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
2033 /* Record call frame debugging information for INSN, which either sets
2034 SP or FP (adjusting how we calculate the frame address) or saves a
2035 register to the stack. */
2038 dwarf2out_frame_debug (rtx_insn
*insn
)
2041 bool handled_one
= false;
2043 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2044 switch (REG_NOTE_KIND (note
))
2046 case REG_FRAME_RELATED_EXPR
:
2047 pat
= XEXP (note
, 0);
2050 case REG_CFA_DEF_CFA
:
2051 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2055 case REG_CFA_ADJUST_CFA
:
2060 if (GET_CODE (n
) == PARALLEL
)
2061 n
= XVECEXP (n
, 0, 0);
2063 dwarf2out_frame_debug_adjust_cfa (n
);
2067 case REG_CFA_OFFSET
:
2070 n
= single_set (insn
);
2071 dwarf2out_frame_debug_cfa_offset (n
);
2075 case REG_CFA_REGISTER
:
2080 if (GET_CODE (n
) == PARALLEL
)
2081 n
= XVECEXP (n
, 0, 0);
2083 dwarf2out_frame_debug_cfa_register (n
);
2087 case REG_CFA_EXPRESSION
:
2088 case REG_CFA_VAL_EXPRESSION
:
2091 n
= single_set (insn
);
2093 if (REG_NOTE_KIND (note
) == REG_CFA_EXPRESSION
)
2094 dwarf2out_frame_debug_cfa_expression (n
);
2096 dwarf2out_frame_debug_cfa_val_expression (n
);
2101 case REG_CFA_RESTORE
:
2106 if (GET_CODE (n
) == PARALLEL
)
2107 n
= XVECEXP (n
, 0, 0);
2110 dwarf2out_frame_debug_cfa_restore (n
);
2114 case REG_CFA_SET_VDRAP
:
2118 dw_fde_ref fde
= cfun
->fde
;
2121 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2123 fde
->vdrap_reg
= dwf_regno (n
);
2129 case REG_CFA_TOGGLE_RA_MANGLE
:
2130 case REG_CFA_WINDOW_SAVE
:
2131 /* We overload both of these operations onto the same DWARF opcode. */
2132 dwarf2out_frame_debug_cfa_window_save ();
2136 case REG_CFA_FLUSH_QUEUE
:
2137 /* The actual flush happens elsewhere. */
2147 pat
= PATTERN (insn
);
2149 dwarf2out_frame_debug_expr (pat
);
2151 /* Check again. A parallel can save and update the same register.
2152 We could probably check just once, here, but this is safer than
2153 removing the check at the start of the function. */
2154 if (clobbers_queued_reg_save (pat
))
2155 dwarf2out_flush_queued_reg_saves ();
2159 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2162 change_cfi_row (dw_cfi_row
*old_row
, dw_cfi_row
*new_row
)
2164 size_t i
, n_old
, n_new
, n_max
;
2167 if (new_row
->cfa_cfi
&& !cfi_equal_p (old_row
->cfa_cfi
, new_row
->cfa_cfi
))
2168 add_cfi (new_row
->cfa_cfi
);
2171 cfi
= def_cfa_0 (&old_row
->cfa
, &new_row
->cfa
);
2176 n_old
= vec_safe_length (old_row
->reg_save
);
2177 n_new
= vec_safe_length (new_row
->reg_save
);
2178 n_max
= MAX (n_old
, n_new
);
2180 for (i
= 0; i
< n_max
; ++i
)
2182 dw_cfi_ref r_old
= NULL
, r_new
= NULL
;
2185 r_old
= (*old_row
->reg_save
)[i
];
2187 r_new
= (*new_row
->reg_save
)[i
];
2191 else if (r_new
== NULL
)
2192 add_cfi_restore (i
);
2193 else if (!cfi_equal_p (r_old
, r_new
))
2198 /* Examine CFI and return true if a cfi label and set_loc is needed
2199 beforehand. Even when generating CFI assembler instructions, we
2200 still have to add the cfi to the list so that lookup_cfa_1 works
2201 later on. When -g2 and above we even need to force emitting of
2202 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2203 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2204 and so don't use convert_cfa_to_fb_loc_list. */
2207 cfi_label_required_p (dw_cfi_ref cfi
)
2209 if (!dwarf2out_do_cfi_asm ())
2212 if (dwarf_version
== 2
2213 && debug_info_level
> DINFO_LEVEL_TERSE
2214 && (write_symbols
== DWARF2_DEBUG
2215 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2217 switch (cfi
->dw_cfi_opc
)
2219 case DW_CFA_def_cfa_offset
:
2220 case DW_CFA_def_cfa_offset_sf
:
2221 case DW_CFA_def_cfa_register
:
2222 case DW_CFA_def_cfa
:
2223 case DW_CFA_def_cfa_sf
:
2224 case DW_CFA_def_cfa_expression
:
2225 case DW_CFA_restore_state
:
2234 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2235 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2238 add_cfis_to_fde (void)
2240 dw_fde_ref fde
= cfun
->fde
;
2241 rtx_insn
*insn
, *next
;
2243 for (insn
= get_insns (); insn
; insn
= next
)
2245 next
= NEXT_INSN (insn
);
2247 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2248 fde
->dw_fde_switch_cfi_index
= vec_safe_length (fde
->dw_fde_cfi
);
2250 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2252 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2254 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2256 required
|= cfi_label_required_p (NOTE_CFI (next
));
2257 next
= NEXT_INSN (next
);
2259 else if (active_insn_p (next
)
2260 || (NOTE_P (next
) && (NOTE_KIND (next
)
2261 == NOTE_INSN_SWITCH_TEXT_SECTIONS
)))
2264 next
= NEXT_INSN (next
);
2267 int num
= dwarf2out_cfi_label_num
;
2268 const char *label
= dwarf2out_cfi_label ();
2271 /* Set the location counter to the new label. */
2273 xcfi
->dw_cfi_opc
= DW_CFA_advance_loc4
;
2274 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2275 vec_safe_push (fde
->dw_fde_cfi
, xcfi
);
2277 rtx_note
*tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2278 NOTE_LABEL_NUMBER (tmp
) = num
;
2283 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2284 vec_safe_push (fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2285 insn
= NEXT_INSN (insn
);
2287 while (insn
!= next
);
2292 static void dump_cfi_row (FILE *f
, dw_cfi_row
*row
);
2294 /* If LABEL is the start of a trace, then initialize the state of that
2295 trace from CUR_TRACE and CUR_ROW. */
2298 maybe_record_trace_start (rtx_insn
*start
, rtx_insn
*origin
)
2302 ti
= get_trace_info (start
);
2303 gcc_assert (ti
!= NULL
);
2307 fprintf (dump_file
, " saw edge from trace %u to %u (via %s %d)\n",
2308 cur_trace
->id
, ti
->id
,
2309 (origin
? rtx_name
[(int) GET_CODE (origin
)] : "fallthru"),
2310 (origin
? INSN_UID (origin
) : 0));
2313 poly_int64 args_size
= cur_trace
->end_true_args_size
;
2314 if (ti
->beg_row
== NULL
)
2316 /* This is the first time we've encountered this trace. Propagate
2317 state across the edge and push the trace onto the work list. */
2318 ti
->beg_row
= copy_cfi_row (cur_row
);
2319 ti
->beg_true_args_size
= args_size
;
2321 ti
->cfa_store
= cur_trace
->cfa_store
;
2322 ti
->cfa_temp
= cur_trace
->cfa_temp
;
2323 ti
->regs_saved_in_regs
= cur_trace
->regs_saved_in_regs
.copy ();
2325 trace_work_list
.safe_push (ti
);
2328 fprintf (dump_file
, "\tpush trace %u to worklist\n", ti
->id
);
2333 /* We ought to have the same state incoming to a given trace no
2334 matter how we arrive at the trace. Anything else means we've
2335 got some kind of optimization error. */
2337 if (!cfi_row_equal_p (cur_row
, ti
->beg_row
))
2341 fprintf (dump_file
, "Inconsistent CFI state!\n");
2342 fprintf (dump_file
, "SHOULD have:\n");
2343 dump_cfi_row (dump_file
, ti
->beg_row
);
2344 fprintf (dump_file
, "DO have:\n");
2345 dump_cfi_row (dump_file
, cur_row
);
2352 /* The args_size is allowed to conflict if it isn't actually used. */
2353 if (maybe_ne (ti
->beg_true_args_size
, args_size
))
2354 ti
->args_size_undefined
= true;
2358 /* Similarly, but handle the args_size and CFA reset across EH
2359 and non-local goto edges. */
2362 maybe_record_trace_start_abnormal (rtx_insn
*start
, rtx_insn
*origin
)
2364 poly_int64 save_args_size
, delta
;
2365 dw_cfa_location save_cfa
;
2367 save_args_size
= cur_trace
->end_true_args_size
;
2368 if (known_eq (save_args_size
, 0))
2370 maybe_record_trace_start (start
, origin
);
2374 delta
= -save_args_size
;
2375 cur_trace
->end_true_args_size
= 0;
2377 save_cfa
= cur_row
->cfa
;
2378 if (cur_row
->cfa
.reg
== dw_stack_pointer_regnum
)
2380 /* Convert a change in args_size (always a positive in the
2381 direction of stack growth) to a change in stack pointer. */
2382 if (!STACK_GROWS_DOWNWARD
)
2385 cur_row
->cfa
.offset
+= delta
;
2388 maybe_record_trace_start (start
, origin
);
2390 cur_trace
->end_true_args_size
= save_args_size
;
2391 cur_row
->cfa
= save_cfa
;
2394 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2395 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2398 create_trace_edges (rtx_insn
*insn
)
2405 rtx_jump_table_data
*table
;
2407 if (find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2410 if (tablejump_p (insn
, NULL
, &table
))
2412 rtvec vec
= table
->get_labels ();
2414 n
= GET_NUM_ELEM (vec
);
2415 for (i
= 0; i
< n
; ++i
)
2417 rtx_insn
*lab
= as_a
<rtx_insn
*> (XEXP (RTVEC_ELT (vec
, i
), 0));
2418 maybe_record_trace_start (lab
, insn
);
2421 else if (computed_jump_p (insn
))
2425 FOR_EACH_VEC_SAFE_ELT (forced_labels
, i
, temp
)
2426 maybe_record_trace_start (temp
, insn
);
2428 else if (returnjump_p (insn
))
2430 else if ((tmp
= extract_asm_operands (PATTERN (insn
))) != NULL
)
2432 n
= ASM_OPERANDS_LABEL_LENGTH (tmp
);
2433 for (i
= 0; i
< n
; ++i
)
2436 as_a
<rtx_insn
*> (XEXP (ASM_OPERANDS_LABEL (tmp
, i
), 0));
2437 maybe_record_trace_start (lab
, insn
);
2442 rtx_insn
*lab
= JUMP_LABEL_AS_INSN (insn
);
2443 gcc_assert (lab
!= NULL
);
2444 maybe_record_trace_start (lab
, insn
);
2447 else if (CALL_P (insn
))
2449 /* Sibling calls don't have edges inside this function. */
2450 if (SIBLING_CALL_P (insn
))
2453 /* Process non-local goto edges. */
2454 if (can_nonlocal_goto (insn
))
2455 for (rtx_insn_list
*lab
= nonlocal_goto_handler_labels
;
2458 maybe_record_trace_start_abnormal (lab
->insn (), insn
);
2460 else if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2462 int i
, n
= seq
->len ();
2463 for (i
= 0; i
< n
; ++i
)
2464 create_trace_edges (seq
->insn (i
));
2468 /* Process EH edges. */
2469 if (CALL_P (insn
) || cfun
->can_throw_non_call_exceptions
)
2471 eh_landing_pad lp
= get_eh_landing_pad_from_rtx (insn
);
2473 maybe_record_trace_start_abnormal (lp
->landing_pad
, insn
);
2477 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2480 scan_insn_after (rtx_insn
*insn
)
2482 if (RTX_FRAME_RELATED_P (insn
))
2483 dwarf2out_frame_debug (insn
);
2484 notice_args_size (insn
);
2487 /* Scan the trace beginning at INSN and create the CFI notes for the
2488 instructions therein. */
2491 scan_trace (dw_trace_info
*trace
, bool entry
)
2493 rtx_insn
*prev
, *insn
= trace
->head
;
2494 dw_cfa_location this_cfa
;
2497 fprintf (dump_file
, "Processing trace %u : start at %s %d\n",
2498 trace
->id
, rtx_name
[(int) GET_CODE (insn
)],
2501 trace
->end_row
= copy_cfi_row (trace
->beg_row
);
2502 trace
->end_true_args_size
= trace
->beg_true_args_size
;
2505 cur_row
= trace
->end_row
;
2507 this_cfa
= cur_row
->cfa
;
2508 cur_cfa
= &this_cfa
;
2510 /* If the current function starts with a non-standard incoming frame
2511 sp offset, emit a note before the first instruction. */
2513 && DEFAULT_INCOMING_FRAME_SP_OFFSET
!= INCOMING_FRAME_SP_OFFSET
)
2515 add_cfi_insn
= insn
;
2516 gcc_assert (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED
);
2517 this_cfa
.offset
= INCOMING_FRAME_SP_OFFSET
;
2518 def_cfa_1 (&this_cfa
);
2521 for (prev
= insn
, insn
= NEXT_INSN (insn
);
2523 prev
= insn
, insn
= NEXT_INSN (insn
))
2527 /* Do everything that happens "before" the insn. */
2528 add_cfi_insn
= prev
;
2530 /* Notice the end of a trace. */
2531 if (BARRIER_P (insn
))
2533 /* Don't bother saving the unneeded queued registers at all. */
2534 queued_reg_saves
.truncate (0);
2537 if (save_point_p (insn
))
2539 /* Propagate across fallthru edges. */
2540 dwarf2out_flush_queued_reg_saves ();
2541 maybe_record_trace_start (insn
, NULL
);
2545 if (DEBUG_INSN_P (insn
) || !inside_basic_block_p (insn
))
2548 /* Handle all changes to the row state. Sequences require special
2549 handling for the positioning of the notes. */
2550 if (rtx_sequence
*pat
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2553 int i
, n
= pat
->len ();
2555 control
= pat
->insn (0);
2556 if (can_throw_internal (control
))
2557 notice_eh_throw (control
);
2558 dwarf2out_flush_queued_reg_saves ();
2560 if (JUMP_P (control
) && INSN_ANNULLED_BRANCH_P (control
))
2562 /* ??? Hopefully multiple delay slots are not annulled. */
2563 gcc_assert (n
== 2);
2564 gcc_assert (!RTX_FRAME_RELATED_P (control
));
2565 gcc_assert (!find_reg_note (control
, REG_ARGS_SIZE
, NULL
));
2567 elt
= pat
->insn (1);
2569 if (INSN_FROM_TARGET_P (elt
))
2571 cfi_vec save_row_reg_save
;
2573 /* If ELT is an instruction from target of an annulled
2574 branch, the effects are for the target only and so
2575 the args_size and CFA along the current path
2576 shouldn't change. */
2577 add_cfi_insn
= NULL
;
2578 poly_int64 restore_args_size
= cur_trace
->end_true_args_size
;
2579 cur_cfa
= &cur_row
->cfa
;
2580 save_row_reg_save
= vec_safe_copy (cur_row
->reg_save
);
2582 scan_insn_after (elt
);
2584 /* ??? Should we instead save the entire row state? */
2585 gcc_assert (!queued_reg_saves
.length ());
2587 create_trace_edges (control
);
2589 cur_trace
->end_true_args_size
= restore_args_size
;
2590 cur_row
->cfa
= this_cfa
;
2591 cur_row
->reg_save
= save_row_reg_save
;
2592 cur_cfa
= &this_cfa
;
2596 /* If ELT is a annulled branch-taken instruction (i.e.
2597 executed only when branch is not taken), the args_size
2598 and CFA should not change through the jump. */
2599 create_trace_edges (control
);
2601 /* Update and continue with the trace. */
2602 add_cfi_insn
= insn
;
2603 scan_insn_after (elt
);
2604 def_cfa_1 (&this_cfa
);
2609 /* The insns in the delay slot should all be considered to happen
2610 "before" a call insn. Consider a call with a stack pointer
2611 adjustment in the delay slot. The backtrace from the callee
2612 should include the sp adjustment. Unfortunately, that leaves
2613 us with an unavoidable unwinding error exactly at the call insn
2614 itself. For jump insns we'd prefer to avoid this error by
2615 placing the notes after the sequence. */
2616 if (JUMP_P (control
))
2617 add_cfi_insn
= insn
;
2619 for (i
= 1; i
< n
; ++i
)
2621 elt
= pat
->insn (i
);
2622 scan_insn_after (elt
);
2625 /* Make sure any register saves are visible at the jump target. */
2626 dwarf2out_flush_queued_reg_saves ();
2627 any_cfis_emitted
= false;
2629 /* However, if there is some adjustment on the call itself, e.g.
2630 a call_pop, that action should be considered to happen after
2631 the call returns. */
2632 add_cfi_insn
= insn
;
2633 scan_insn_after (control
);
2637 /* Flush data before calls and jumps, and of course if necessary. */
2638 if (can_throw_internal (insn
))
2640 notice_eh_throw (insn
);
2641 dwarf2out_flush_queued_reg_saves ();
2643 else if (!NONJUMP_INSN_P (insn
)
2644 || clobbers_queued_reg_save (insn
)
2645 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2646 dwarf2out_flush_queued_reg_saves ();
2647 any_cfis_emitted
= false;
2649 add_cfi_insn
= insn
;
2650 scan_insn_after (insn
);
2654 /* Between frame-related-p and args_size we might have otherwise
2655 emitted two cfa adjustments. Do it now. */
2656 def_cfa_1 (&this_cfa
);
2658 /* Minimize the number of advances by emitting the entire queue
2659 once anything is emitted. */
2660 if (any_cfis_emitted
2661 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2662 dwarf2out_flush_queued_reg_saves ();
2664 /* Note that a test for control_flow_insn_p does exactly the
2665 same tests as are done to actually create the edges. So
2666 always call the routine and let it not create edges for
2667 non-control-flow insns. */
2668 create_trace_edges (control
);
2671 add_cfi_insn
= NULL
;
2677 /* Scan the function and create the initial set of CFI notes. */
2680 create_cfi_notes (void)
2684 gcc_checking_assert (!queued_reg_saves
.exists ());
2685 gcc_checking_assert (!trace_work_list
.exists ());
2687 /* Always begin at the entry trace. */
2688 ti
= &trace_info
[0];
2689 scan_trace (ti
, true);
2691 while (!trace_work_list
.is_empty ())
2693 ti
= trace_work_list
.pop ();
2694 scan_trace (ti
, false);
2697 queued_reg_saves
.release ();
2698 trace_work_list
.release ();
2701 /* Return the insn before the first NOTE_INSN_CFI after START. */
2704 before_next_cfi_note (rtx_insn
*start
)
2706 rtx_insn
*prev
= start
;
2709 if (NOTE_P (start
) && NOTE_KIND (start
) == NOTE_INSN_CFI
)
2712 start
= NEXT_INSN (start
);
2717 /* Insert CFI notes between traces to properly change state between them. */
2720 connect_traces (void)
2723 dw_trace_info
*prev_ti
, *ti
;
2725 /* ??? Ideally, we should have both queued and processed every trace.
2726 However the current representation of constant pools on various targets
2727 is indistinguishable from unreachable code. Assume for the moment that
2728 we can simply skip over such traces. */
2729 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2730 these are not "real" instructions, and should not be considered.
2731 This could be generically useful for tablejump data as well. */
2732 /* Remove all unprocessed traces from the list. */
2734 VEC_ORDERED_REMOVE_IF_FROM_TO (trace_info
, ix
, ix2
, ti
, 1,
2735 trace_info
.length (), ti
->beg_row
== NULL
);
2736 FOR_EACH_VEC_ELT (trace_info
, ix
, ti
)
2737 gcc_assert (ti
->end_row
!= NULL
);
2739 /* Work from the end back to the beginning. This lets us easily insert
2740 remember/restore_state notes in the correct order wrt other notes. */
2741 n
= trace_info
.length ();
2742 prev_ti
= &trace_info
[n
- 1];
2743 for (i
= n
- 1; i
> 0; --i
)
2745 dw_cfi_row
*old_row
;
2748 prev_ti
= &trace_info
[i
- 1];
2750 add_cfi_insn
= ti
->head
;
2752 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2753 for the portion of the function in the alternate text
2754 section. The row state at the very beginning of that
2755 new FDE will be exactly the row state from the CIE. */
2756 if (ti
->switch_sections
)
2757 old_row
= cie_cfi_row
;
2760 old_row
= prev_ti
->end_row
;
2761 /* If there's no change from the previous end state, fine. */
2762 if (cfi_row_equal_p (old_row
, ti
->beg_row
))
2764 /* Otherwise check for the common case of sharing state with
2765 the beginning of an epilogue, but not the end. Insert
2766 remember/restore opcodes in that case. */
2767 else if (cfi_row_equal_p (prev_ti
->beg_row
, ti
->beg_row
))
2771 /* Note that if we blindly insert the remember at the
2772 start of the trace, we can wind up increasing the
2773 size of the unwind info due to extra advance opcodes.
2774 Instead, put the remember immediately before the next
2775 state change. We know there must be one, because the
2776 state at the beginning and head of the trace differ. */
2777 add_cfi_insn
= before_next_cfi_note (prev_ti
->head
);
2779 cfi
->dw_cfi_opc
= DW_CFA_remember_state
;
2782 add_cfi_insn
= ti
->head
;
2784 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2787 old_row
= prev_ti
->beg_row
;
2789 /* Otherwise, we'll simply change state from the previous end. */
2792 change_cfi_row (old_row
, ti
->beg_row
);
2794 if (dump_file
&& add_cfi_insn
!= ti
->head
)
2798 fprintf (dump_file
, "Fixup between trace %u and %u:\n",
2799 prev_ti
->id
, ti
->id
);
2804 note
= NEXT_INSN (note
);
2805 gcc_assert (NOTE_P (note
) && NOTE_KIND (note
) == NOTE_INSN_CFI
);
2806 output_cfi_directive (dump_file
, NOTE_CFI (note
));
2808 while (note
!= add_cfi_insn
);
2812 /* Connect args_size between traces that have can_throw_internal insns. */
2813 if (cfun
->eh
->lp_array
)
2815 poly_int64 prev_args_size
= 0;
2817 for (i
= 0; i
< n
; ++i
)
2819 ti
= &trace_info
[i
];
2821 if (ti
->switch_sections
)
2823 if (ti
->eh_head
== NULL
)
2825 gcc_assert (!ti
->args_size_undefined
);
2827 if (maybe_ne (ti
->beg_delay_args_size
, prev_args_size
))
2829 /* ??? Search back to previous CFI note. */
2830 add_cfi_insn
= PREV_INSN (ti
->eh_head
);
2831 add_cfi_args_size (ti
->beg_delay_args_size
);
2834 prev_args_size
= ti
->end_delay_args_size
;
2839 /* Set up the pseudo-cfg of instruction traces, as described at the
2840 block comment at the top of the file. */
2843 create_pseudo_cfg (void)
2845 bool saw_barrier
, switch_sections
;
2850 /* The first trace begins at the start of the function,
2851 and begins with the CIE row state. */
2852 trace_info
.create (16);
2853 memset (&ti
, 0, sizeof (ti
));
2854 ti
.head
= get_insns ();
2855 ti
.beg_row
= cie_cfi_row
;
2856 ti
.cfa_store
= cie_cfi_row
->cfa
;
2857 ti
.cfa_temp
.reg
= INVALID_REGNUM
;
2858 trace_info
.quick_push (ti
);
2860 if (cie_return_save
)
2861 ti
.regs_saved_in_regs
.safe_push (*cie_return_save
);
2863 /* Walk all the insns, collecting start of trace locations. */
2864 saw_barrier
= false;
2865 switch_sections
= false;
2866 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2868 if (BARRIER_P (insn
))
2870 else if (NOTE_P (insn
)
2871 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2873 /* We should have just seen a barrier. */
2874 gcc_assert (saw_barrier
);
2875 switch_sections
= true;
2877 /* Watch out for save_point notes between basic blocks.
2878 In particular, a note after a barrier. Do not record these,
2879 delaying trace creation until the label. */
2880 else if (save_point_p (insn
)
2881 && (LABEL_P (insn
) || !saw_barrier
))
2883 memset (&ti
, 0, sizeof (ti
));
2885 ti
.switch_sections
= switch_sections
;
2886 ti
.id
= trace_info
.length ();
2887 trace_info
.safe_push (ti
);
2889 saw_barrier
= false;
2890 switch_sections
= false;
2894 /* Create the trace index after we've finished building trace_info,
2895 avoiding stale pointer problems due to reallocation. */
2897 = new hash_table
<trace_info_hasher
> (trace_info
.length ());
2899 FOR_EACH_VEC_ELT (trace_info
, i
, tp
)
2901 dw_trace_info
**slot
;
2904 fprintf (dump_file
, "Creating trace %u : start at %s %d%s\n", tp
->id
,
2905 rtx_name
[(int) GET_CODE (tp
->head
)], INSN_UID (tp
->head
),
2906 tp
->switch_sections
? " (section switch)" : "");
2908 slot
= trace_index
->find_slot_with_hash (tp
, INSN_UID (tp
->head
), INSERT
);
2909 gcc_assert (*slot
== NULL
);
2914 /* Record the initial position of the return address. RTL is
2915 INCOMING_RETURN_ADDR_RTX. */
2918 initial_return_save (rtx rtl
)
2920 unsigned int reg
= INVALID_REGNUM
;
2921 poly_int64 offset
= 0;
2923 switch (GET_CODE (rtl
))
2926 /* RA is in a register. */
2927 reg
= dwf_regno (rtl
);
2931 /* RA is on the stack. */
2932 rtl
= XEXP (rtl
, 0);
2933 switch (GET_CODE (rtl
))
2936 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2941 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2942 offset
= rtx_to_poly_int64 (XEXP (rtl
, 1));
2946 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2947 offset
= -rtx_to_poly_int64 (XEXP (rtl
, 1));
2957 /* The return address is at some offset from any value we can
2958 actually load. For instance, on the SPARC it is in %i7+8. Just
2959 ignore the offset for now; it doesn't matter for unwinding frames. */
2960 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2961 initial_return_save (XEXP (rtl
, 0));
2968 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2970 if (reg
!= INVALID_REGNUM
)
2971 record_reg_saved_in_reg (rtl
, pc_rtx
);
2972 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cur_row
->cfa
.offset
);
2977 create_cie_data (void)
2979 dw_cfa_location loc
;
2980 dw_trace_info cie_trace
;
2982 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2984 memset (&cie_trace
, 0, sizeof (cie_trace
));
2985 cur_trace
= &cie_trace
;
2987 add_cfi_vec
= &cie_cfi_vec
;
2988 cie_cfi_row
= cur_row
= new_cfi_row ();
2990 /* On entry, the Canonical Frame Address is at SP. */
2991 memset (&loc
, 0, sizeof (loc
));
2992 loc
.reg
= dw_stack_pointer_regnum
;
2993 /* create_cie_data is called just once per TU, and when using .cfi_startproc
2994 is even done by the assembler rather than the compiler. If the target
2995 has different incoming frame sp offsets depending on what kind of
2996 function it is, use a single constant offset for the target and
2997 if needed, adjust before the first instruction in insn stream. */
2998 loc
.offset
= DEFAULT_INCOMING_FRAME_SP_OFFSET
;
3001 if (targetm
.debug_unwind_info () == UI_DWARF2
3002 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3004 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
3006 /* For a few targets, we have the return address incoming into a
3007 register, but choose a different return column. This will result
3008 in a DW_CFA_register for the return, and an entry in
3009 regs_saved_in_regs to match. If the target later stores that
3010 return address register to the stack, we want to be able to emit
3011 the DW_CFA_offset against the return column, not the intermediate
3012 save register. Save the contents of regs_saved_in_regs so that
3013 we can re-initialize it at the start of each function. */
3014 switch (cie_trace
.regs_saved_in_regs
.length ())
3019 cie_return_save
= ggc_alloc
<reg_saved_in_data
> ();
3020 *cie_return_save
= cie_trace
.regs_saved_in_regs
[0];
3021 cie_trace
.regs_saved_in_regs
.release ();
3033 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3034 state at each location within the function. These notes will be
3035 emitted during pass_final. */
3038 execute_dwarf2_frame (void)
3040 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3041 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
3043 /* The first time we're called, compute the incoming frame state. */
3044 if (cie_cfi_vec
== NULL
)
3047 dwarf2out_alloc_current_fde ();
3049 create_pseudo_cfg ();
3052 create_cfi_notes ();
3056 /* Free all the data we allocated. */
3061 FOR_EACH_VEC_ELT (trace_info
, i
, ti
)
3062 ti
->regs_saved_in_regs
.release ();
3064 trace_info
.release ();
3072 /* Convert a DWARF call frame info. operation to its string name */
3075 dwarf_cfi_name (unsigned int cfi_opc
)
3077 const char *name
= get_DW_CFA_name (cfi_opc
);
3082 return "DW_CFA_<unknown>";
3085 /* This routine will generate the correct assembly data for a location
3086 description based on a cfi entry with a complex address. */
3089 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
3091 dw_loc_descr_ref loc
;
3094 if (cfi
->dw_cfi_opc
== DW_CFA_expression
3095 || cfi
->dw_cfi_opc
== DW_CFA_val_expression
)
3098 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3099 dw2_asm_output_data (1, r
, NULL
);
3100 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3103 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3105 /* Output the size of the block. */
3106 size
= size_of_locs (loc
);
3107 dw2_asm_output_data_uleb128 (size
, NULL
);
3109 /* Now output the operations themselves. */
3110 output_loc_sequence (loc
, for_eh
);
3113 /* Similar, but used for .cfi_escape. */
3116 output_cfa_loc_raw (dw_cfi_ref cfi
)
3118 dw_loc_descr_ref loc
;
3121 if (cfi
->dw_cfi_opc
== DW_CFA_expression
3122 || cfi
->dw_cfi_opc
== DW_CFA_val_expression
)
3125 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3126 fprintf (asm_out_file
, "%#x,", r
);
3127 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3130 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3132 /* Output the size of the block. */
3133 size
= size_of_locs (loc
);
3134 dw2_asm_output_data_uleb128_raw (size
);
3135 fputc (',', asm_out_file
);
3137 /* Now output the operations themselves. */
3138 output_loc_sequence_raw (loc
);
3141 /* Output a Call Frame Information opcode and its operand(s). */
3144 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
3149 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
3150 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
3151 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
3152 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
3153 ((unsigned HOST_WIDE_INT
)
3154 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
3155 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
3157 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3158 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3159 "DW_CFA_offset, column %#lx", r
);
3160 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3161 dw2_asm_output_data_uleb128 (off
, NULL
);
3163 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
3165 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3166 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3167 "DW_CFA_restore, column %#lx", r
);
3171 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
3172 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
3174 switch (cfi
->dw_cfi_opc
)
3176 case DW_CFA_set_loc
:
3178 dw2_asm_output_encoded_addr_rtx (
3179 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3180 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
3183 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
3184 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
3185 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3188 case DW_CFA_advance_loc1
:
3189 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3190 fde
->dw_fde_current_label
, NULL
);
3191 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3194 case DW_CFA_advance_loc2
:
3195 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3196 fde
->dw_fde_current_label
, NULL
);
3197 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3200 case DW_CFA_advance_loc4
:
3201 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3202 fde
->dw_fde_current_label
, NULL
);
3203 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3206 case DW_CFA_MIPS_advance_loc8
:
3207 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3208 fde
->dw_fde_current_label
, NULL
);
3209 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3212 case DW_CFA_offset_extended
:
3213 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3214 dw2_asm_output_data_uleb128 (r
, NULL
);
3215 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3216 dw2_asm_output_data_uleb128 (off
, NULL
);
3219 case DW_CFA_def_cfa
:
3220 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3221 dw2_asm_output_data_uleb128 (r
, NULL
);
3222 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
3225 case DW_CFA_offset_extended_sf
:
3226 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3227 dw2_asm_output_data_uleb128 (r
, NULL
);
3228 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3229 dw2_asm_output_data_sleb128 (off
, NULL
);
3232 case DW_CFA_def_cfa_sf
:
3233 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3234 dw2_asm_output_data_uleb128 (r
, NULL
);
3235 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3236 dw2_asm_output_data_sleb128 (off
, NULL
);
3239 case DW_CFA_restore_extended
:
3240 case DW_CFA_undefined
:
3241 case DW_CFA_same_value
:
3242 case DW_CFA_def_cfa_register
:
3243 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3244 dw2_asm_output_data_uleb128 (r
, NULL
);
3247 case DW_CFA_register
:
3248 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3249 dw2_asm_output_data_uleb128 (r
, NULL
);
3250 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
3251 dw2_asm_output_data_uleb128 (r
, NULL
);
3254 case DW_CFA_def_cfa_offset
:
3255 case DW_CFA_GNU_args_size
:
3256 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
3259 case DW_CFA_def_cfa_offset_sf
:
3260 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3261 dw2_asm_output_data_sleb128 (off
, NULL
);
3264 case DW_CFA_GNU_window_save
:
3267 case DW_CFA_def_cfa_expression
:
3268 case DW_CFA_expression
:
3269 case DW_CFA_val_expression
:
3270 output_cfa_loc (cfi
, for_eh
);
3273 case DW_CFA_GNU_negative_offset_extended
:
3274 /* Obsoleted by DW_CFA_offset_extended_sf. */
3283 /* Similar, but do it via assembler directives instead. */
3286 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
3288 unsigned long r
, r2
;
3290 switch (cfi
->dw_cfi_opc
)
3292 case DW_CFA_advance_loc
:
3293 case DW_CFA_advance_loc1
:
3294 case DW_CFA_advance_loc2
:
3295 case DW_CFA_advance_loc4
:
3296 case DW_CFA_MIPS_advance_loc8
:
3297 case DW_CFA_set_loc
:
3298 /* Should only be created in a code path not followed when emitting
3299 via directives. The assembler is going to take care of this for
3300 us. But this routines is also used for debugging dumps, so
3302 gcc_assert (f
!= asm_out_file
);
3303 fprintf (f
, "\t.cfi_advance_loc\n");
3307 case DW_CFA_offset_extended
:
3308 case DW_CFA_offset_extended_sf
:
3309 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3310 fprintf (f
, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3311 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3314 case DW_CFA_restore
:
3315 case DW_CFA_restore_extended
:
3316 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3317 fprintf (f
, "\t.cfi_restore %lu\n", r
);
3320 case DW_CFA_undefined
:
3321 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3322 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
3325 case DW_CFA_same_value
:
3326 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3327 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
3330 case DW_CFA_def_cfa
:
3331 case DW_CFA_def_cfa_sf
:
3332 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3333 fprintf (f
, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3334 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3337 case DW_CFA_def_cfa_register
:
3338 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3339 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
3342 case DW_CFA_register
:
3343 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3344 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
3345 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
3348 case DW_CFA_def_cfa_offset
:
3349 case DW_CFA_def_cfa_offset_sf
:
3350 fprintf (f
, "\t.cfi_def_cfa_offset "
3351 HOST_WIDE_INT_PRINT_DEC
"\n",
3352 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3355 case DW_CFA_remember_state
:
3356 fprintf (f
, "\t.cfi_remember_state\n");
3358 case DW_CFA_restore_state
:
3359 fprintf (f
, "\t.cfi_restore_state\n");
3362 case DW_CFA_GNU_args_size
:
3363 if (f
== asm_out_file
)
3365 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
3366 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3368 fprintf (f
, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC
,
3369 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3374 fprintf (f
, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC
"\n",
3375 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3379 case DW_CFA_GNU_window_save
:
3380 fprintf (f
, "\t.cfi_window_save\n");
3383 case DW_CFA_def_cfa_expression
:
3384 case DW_CFA_expression
:
3385 case DW_CFA_val_expression
:
3386 if (f
!= asm_out_file
)
3388 fprintf (f
, "\t.cfi_%scfa_%sexpression ...\n",
3389 cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
? "def_" : "",
3390 cfi
->dw_cfi_opc
== DW_CFA_val_expression
? "val_" : "");
3393 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
3394 output_cfa_loc_raw (cfi
);
3404 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
3406 if (dwarf2out_do_cfi_asm ())
3407 output_cfi_directive (asm_out_file
, cfi
);
3411 dump_cfi_row (FILE *f
, dw_cfi_row
*row
)
3419 dw_cfa_location dummy
;
3420 memset (&dummy
, 0, sizeof (dummy
));
3421 dummy
.reg
= INVALID_REGNUM
;
3422 cfi
= def_cfa_0 (&dummy
, &row
->cfa
);
3424 output_cfi_directive (f
, cfi
);
3426 FOR_EACH_VEC_SAFE_ELT (row
->reg_save
, i
, cfi
)
3428 output_cfi_directive (f
, cfi
);
3431 void debug_cfi_row (dw_cfi_row
*row
);
3434 debug_cfi_row (dw_cfi_row
*row
)
3436 dump_cfi_row (stderr
, row
);
3440 /* Save the result of dwarf2out_do_frame across PCH.
3441 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3442 static GTY(()) signed char saved_do_cfi_asm
= 0;
3444 /* Decide whether to emit EH frame unwind information for the current
3445 translation unit. */
3448 dwarf2out_do_eh_frame (void)
3451 (flag_unwind_tables
|| flag_exceptions
)
3452 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
;
3455 /* Decide whether we want to emit frame unwind information for the current
3456 translation unit. */
3459 dwarf2out_do_frame (void)
3461 /* We want to emit correct CFA location expressions or lists, so we
3462 have to return true if we're going to output debug info, even if
3463 we're not going to output frame or unwind info. */
3464 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3467 if (saved_do_cfi_asm
> 0)
3470 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3473 if (dwarf2out_do_eh_frame ())
3479 /* Decide whether to emit frame unwind via assembler directives. */
3482 dwarf2out_do_cfi_asm (void)
3486 if (saved_do_cfi_asm
!= 0)
3487 return saved_do_cfi_asm
> 0;
3489 /* Assume failure for a moment. */
3490 saved_do_cfi_asm
= -1;
3492 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3494 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3497 /* Make sure the personality encoding is one the assembler can support.
3498 In particular, aligned addresses can't be handled. */
3499 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3500 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3502 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3503 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3506 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3507 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3508 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
&& !dwarf2out_do_eh_frame ())
3512 saved_do_cfi_asm
= 1;
3518 const pass_data pass_data_dwarf2_frame
=
3520 RTL_PASS
, /* type */
3521 "dwarf2", /* name */
3522 OPTGROUP_NONE
, /* optinfo_flags */
3523 TV_FINAL
, /* tv_id */
3524 0, /* properties_required */
3525 0, /* properties_provided */
3526 0, /* properties_destroyed */
3527 0, /* todo_flags_start */
3528 0, /* todo_flags_finish */
3531 class pass_dwarf2_frame
: public rtl_opt_pass
3534 pass_dwarf2_frame (gcc::context
*ctxt
)
3535 : rtl_opt_pass (pass_data_dwarf2_frame
, ctxt
)
3538 /* opt_pass methods: */
3539 virtual bool gate (function
*);
3540 virtual unsigned int execute (function
*) { return execute_dwarf2_frame (); }
3542 }; // class pass_dwarf2_frame
3545 pass_dwarf2_frame::gate (function
*)
3547 /* Targets which still implement the prologue in assembler text
3548 cannot use the generic dwarf2 unwinding. */
3549 if (!targetm
.have_prologue ())
3552 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3553 from the optimized shrink-wrapping annotations that we will compute.
3554 For now, only produce the CFI notes for dwarf2. */
3555 return dwarf2out_do_frame ();
3561 make_pass_dwarf2_frame (gcc::context
*ctxt
)
3563 return new pass_dwarf2_frame (ctxt
);
3566 #include "gt-dwarf2cfi.h"