1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
31 #include "stor-layout.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
69 /* The expressions for any register column that is saved. */
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
74 struct GTY(()) reg_saved_in_data
{
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
81 of one on the fly while processing the frame-related insns.
83 We create dw_trace_info structures for each extended basic block beginning
84 and ending at a "save point". Save points are labels, barriers, certain
85 notes, and of course the beginning and end of the function.
87 As we encounter control transfer insns, we propagate the "current"
88 row state across the edges to the starts of traces. When checking is
89 enabled, we validate that we propagate the same data from all sources.
91 All traces are members of the TRACE_INFO array, in the order in which
92 they appear in the instruction stream.
94 All save points are present in the TRACE_INDEX hash, mapping the insn
95 starting a trace to the dw_trace_info describing the trace. */
99 /* The insn that begins the trace. */
102 /* The row state at the beginning and end of the trace. */
103 dw_cfi_row
*beg_row
, *end_row
;
105 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
106 while scanning insns. However, the args_size value is irrelevant at
107 any point except can_throw_internal_p insns. Therefore the "delay"
108 sizes the values that must actually be emitted for this trace. */
109 poly_int64_pod beg_true_args_size
, end_true_args_size
;
110 poly_int64_pod beg_delay_args_size
, end_delay_args_size
;
112 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
115 /* The following variables contain data used in interpreting frame related
116 expressions. These are not part of the "real" row state as defined by
117 Dwarf, but it seems like they need to be propagated into a trace in case
118 frame related expressions have been sunk. */
119 /* ??? This seems fragile. These variables are fragments of a larger
120 expression. If we do not keep the entire expression together, we risk
121 not being able to put it together properly. Consider forcing targets
122 to generate self-contained expressions and dropping all of the magic
123 interpretation code in this file. Or at least refusing to shrink wrap
124 any frame related insn that doesn't contain a complete expression. */
126 /* The register used for saving registers to the stack, and its offset
128 dw_cfa_location cfa_store
;
130 /* A temporary register holding an integral value used in adjusting SP
131 or setting up the store_reg. The "offset" field holds the integer
132 value, not an offset. */
133 dw_cfa_location cfa_temp
;
135 /* A set of registers saved in other registers. This is the inverse of
136 the row->reg_save info, if the entry is a DW_CFA_register. This is
137 implemented as a flat array because it normally contains zero or 1
138 entry, depending on the target. IA-64 is the big spender here, using
139 a maximum of 5 entries. */
140 vec
<reg_saved_in_data
> regs_saved_in_regs
;
142 /* An identifier for this trace. Used only for debugging dumps. */
145 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
146 bool switch_sections
;
148 /* True if we've seen different values incoming to beg_true_args_size. */
149 bool args_size_undefined
;
151 /* True if we've seen an insn with a REG_ARGS_SIZE note before EH_HEAD. */
152 bool args_size_defined_for_eh
;
156 /* Hashtable helpers. */
158 struct trace_info_hasher
: nofree_ptr_hash
<dw_trace_info
>
160 static inline hashval_t
hash (const dw_trace_info
*);
161 static inline bool equal (const dw_trace_info
*, const dw_trace_info
*);
165 trace_info_hasher::hash (const dw_trace_info
*ti
)
167 return INSN_UID (ti
->head
);
171 trace_info_hasher::equal (const dw_trace_info
*a
, const dw_trace_info
*b
)
173 return a
->head
== b
->head
;
177 /* The variables making up the pseudo-cfg, as described above. */
178 static vec
<dw_trace_info
> trace_info
;
179 static vec
<dw_trace_info
*> trace_work_list
;
180 static hash_table
<trace_info_hasher
> *trace_index
;
182 /* A vector of call frame insns for the CIE. */
185 /* The state of the first row of the FDE table, which includes the
186 state provided by the CIE. */
187 static GTY(()) dw_cfi_row
*cie_cfi_row
;
189 static GTY(()) reg_saved_in_data
*cie_return_save
;
191 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
193 /* The insn after which a new CFI note should be emitted. */
194 static rtx_insn
*add_cfi_insn
;
196 /* When non-null, add_cfi will add the CFI to this vector. */
197 static cfi_vec
*add_cfi_vec
;
199 /* The current instruction trace. */
200 static dw_trace_info
*cur_trace
;
202 /* The current, i.e. most recently generated, row of the CFI table. */
203 static dw_cfi_row
*cur_row
;
205 /* A copy of the current CFA, for use during the processing of a
207 static dw_cfa_location
*cur_cfa
;
209 /* We delay emitting a register save until either (a) we reach the end
210 of the prologue or (b) the register is clobbered. This clusters
211 register saves so that there are fewer pc advances. */
213 struct queued_reg_save
{
216 poly_int64_pod cfa_offset
;
220 static vec
<queued_reg_save
> queued_reg_saves
;
222 /* True if any CFI directives were emitted at the current insn. */
223 static bool any_cfis_emitted
;
225 /* Short-hand for commonly used register numbers. */
226 static unsigned dw_stack_pointer_regnum
;
227 static unsigned dw_frame_pointer_regnum
;
229 /* Hook used by __throw. */
232 expand_builtin_dwarf_sp_column (void)
234 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
235 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
238 /* MEM is a memory reference for the register size table, each element of
239 which has mode MODE. Initialize column C as a return address column. */
242 init_return_column_size (scalar_int_mode mode
, rtx mem
, unsigned int c
)
244 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
245 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
246 emit_move_insn (adjust_address (mem
, mode
, offset
),
247 gen_int_mode (size
, mode
));
250 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
251 init_one_dwarf_reg_size to communicate on what has been done by the
254 struct init_one_dwarf_reg_state
256 /* Whether the dwarf return column was initialized. */
257 bool wrote_return_column
;
259 /* For each hard register REGNO, whether init_one_dwarf_reg_size
260 was given REGNO to process already. */
261 bool processed_regno
[FIRST_PSEUDO_REGISTER
];
265 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
266 initialize the dwarf register size table entry corresponding to register
267 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
268 use for the size entry to initialize, and INIT_STATE is the communication
269 datastructure conveying what we're doing to our caller. */
272 void init_one_dwarf_reg_size (int regno
, machine_mode regmode
,
273 rtx table
, machine_mode slotmode
,
274 init_one_dwarf_reg_state
*init_state
)
276 const unsigned int dnum
= DWARF_FRAME_REGNUM (regno
);
277 const unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
278 const unsigned int dcol
= DWARF_REG_TO_UNWIND_COLUMN (rnum
);
280 poly_int64 slotoffset
= dcol
* GET_MODE_SIZE (slotmode
);
281 poly_int64 regsize
= GET_MODE_SIZE (regmode
);
283 init_state
->processed_regno
[regno
] = true;
285 if (rnum
>= DWARF_FRAME_REGISTERS
)
288 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
290 if (regmode
== VOIDmode
)
292 init_state
->wrote_return_column
= true;
295 /* ??? When is this true? Should it be a test based on DCOL instead? */
296 if (maybe_lt (slotoffset
, 0))
299 emit_move_insn (adjust_address (table
, slotmode
, slotoffset
),
300 gen_int_mode (regsize
, slotmode
));
303 /* Generate code to initialize the dwarf register size table located
304 at the provided ADDRESS. */
307 expand_builtin_init_dwarf_reg_sizes (tree address
)
310 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (char_type_node
);
311 rtx addr
= expand_normal (address
);
312 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
314 init_one_dwarf_reg_state init_state
;
316 memset ((char *)&init_state
, 0, sizeof (init_state
));
318 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
320 machine_mode save_mode
;
323 /* No point in processing a register multiple times. This could happen
324 with register spans, e.g. when a reg is first processed as a piece of
325 a span, then as a register on its own later on. */
327 if (init_state
.processed_regno
[i
])
330 save_mode
= targetm
.dwarf_frame_reg_mode (i
);
331 span
= targetm
.dwarf_register_span (gen_rtx_REG (save_mode
, i
));
334 init_one_dwarf_reg_size (i
, save_mode
, mem
, mode
, &init_state
);
337 for (int si
= 0; si
< XVECLEN (span
, 0); si
++)
339 rtx reg
= XVECEXP (span
, 0, si
);
341 init_one_dwarf_reg_size
342 (REGNO (reg
), GET_MODE (reg
), mem
, mode
, &init_state
);
347 if (!init_state
.wrote_return_column
)
348 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
350 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
351 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
354 targetm
.init_dwarf_reg_sizes_extra (address
);
358 static dw_trace_info
*
359 get_trace_info (rtx_insn
*insn
)
363 return trace_index
->find_with_hash (&dummy
, INSN_UID (insn
));
367 save_point_p (rtx_insn
*insn
)
369 /* Labels, except those that are really jump tables. */
371 return inside_basic_block_p (insn
);
373 /* We split traces at the prologue/epilogue notes because those
374 are points at which the unwind info is usually stable. This
375 makes it easier to find spots with identical unwind info so
376 that we can use remember/restore_state opcodes. */
378 switch (NOTE_KIND (insn
))
380 case NOTE_INSN_PROLOGUE_END
:
381 case NOTE_INSN_EPILOGUE_BEG
:
388 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
390 static inline HOST_WIDE_INT
391 div_data_align (HOST_WIDE_INT off
)
393 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
394 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
398 /* Return true if we need a signed version of a given opcode
399 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
402 need_data_align_sf_opcode (HOST_WIDE_INT off
)
404 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
407 /* Return a pointer to a newly allocated Call Frame Instruction. */
409 static inline dw_cfi_ref
412 dw_cfi_ref cfi
= ggc_alloc
<dw_cfi_node
> ();
414 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
415 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
420 /* Return a newly allocated CFI row, with no defined data. */
425 dw_cfi_row
*row
= ggc_cleared_alloc
<dw_cfi_row
> ();
427 row
->cfa
.reg
= INVALID_REGNUM
;
432 /* Return a copy of an existing CFI row. */
435 copy_cfi_row (dw_cfi_row
*src
)
437 dw_cfi_row
*dst
= ggc_alloc
<dw_cfi_row
> ();
440 dst
->reg_save
= vec_safe_copy (src
->reg_save
);
445 /* Return a copy of an existing CFA location. */
447 static dw_cfa_location
*
448 copy_cfa (dw_cfa_location
*src
)
450 dw_cfa_location
*dst
= ggc_alloc
<dw_cfa_location
> ();
455 /* Generate a new label for the CFI info to refer to. */
458 dwarf2out_cfi_label (void)
460 int num
= dwarf2out_cfi_label_num
++;
463 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
465 return xstrdup (label
);
468 /* Add CFI either to the current insn stream or to a vector, or both. */
471 add_cfi (dw_cfi_ref cfi
)
473 any_cfis_emitted
= true;
475 if (add_cfi_insn
!= NULL
)
477 add_cfi_insn
= emit_note_after (NOTE_INSN_CFI
, add_cfi_insn
);
478 NOTE_CFI (add_cfi_insn
) = cfi
;
481 if (add_cfi_vec
!= NULL
)
482 vec_safe_push (*add_cfi_vec
, cfi
);
486 add_cfi_args_size (poly_int64 size
)
488 /* We don't yet have a representation for polynomial sizes. */
489 HOST_WIDE_INT const_size
= size
.to_constant ();
491 dw_cfi_ref cfi
= new_cfi ();
493 /* While we can occasionally have args_size < 0 internally, this state
494 should not persist at a point we actually need an opcode. */
495 gcc_assert (const_size
>= 0);
497 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
498 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= const_size
;
504 add_cfi_restore (unsigned reg
)
506 dw_cfi_ref cfi
= new_cfi ();
508 cfi
->dw_cfi_opc
= (reg
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
509 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
514 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
515 that the register column is no longer saved. */
518 update_row_reg_save (dw_cfi_row
*row
, unsigned column
, dw_cfi_ref cfi
)
520 if (vec_safe_length (row
->reg_save
) <= column
)
521 vec_safe_grow_cleared (row
->reg_save
, column
+ 1);
522 (*row
->reg_save
)[column
] = cfi
;
525 /* This function fills in aa dw_cfa_location structure from a dwarf location
526 descriptor sequence. */
529 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_node
*loc
)
531 struct dw_loc_descr_node
*ptr
;
533 cfa
->base_offset
= 0;
537 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
539 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
575 cfa
->reg
= op
- DW_OP_reg0
;
578 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
612 cfa
->reg
= op
- DW_OP_breg0
;
613 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
616 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
617 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
622 case DW_OP_plus_uconst
:
623 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
631 /* Find the previous value for the CFA, iteratively. CFI is the opcode
632 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
633 one level of remember/restore state processing. */
636 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
638 switch (cfi
->dw_cfi_opc
)
640 case DW_CFA_def_cfa_offset
:
641 case DW_CFA_def_cfa_offset_sf
:
642 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
644 case DW_CFA_def_cfa_register
:
645 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
648 case DW_CFA_def_cfa_sf
:
649 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
650 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
652 case DW_CFA_def_cfa_expression
:
653 if (cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
)
654 *loc
= *cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
;
656 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
659 case DW_CFA_remember_state
:
660 gcc_assert (!remember
->in_use
);
662 remember
->in_use
= 1;
664 case DW_CFA_restore_state
:
665 gcc_assert (remember
->in_use
);
667 remember
->in_use
= 0;
675 /* Determine if two dw_cfa_location structures define the same data. */
678 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
680 return (loc1
->reg
== loc2
->reg
681 && known_eq (loc1
->offset
, loc2
->offset
)
682 && loc1
->indirect
== loc2
->indirect
683 && (loc1
->indirect
== 0
684 || known_eq (loc1
->base_offset
, loc2
->base_offset
)));
687 /* Determine if two CFI operands are identical. */
690 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t
, dw_cfi_oprnd
*a
, dw_cfi_oprnd
*b
)
694 case dw_cfi_oprnd_unused
:
696 case dw_cfi_oprnd_reg_num
:
697 return a
->dw_cfi_reg_num
== b
->dw_cfi_reg_num
;
698 case dw_cfi_oprnd_offset
:
699 return a
->dw_cfi_offset
== b
->dw_cfi_offset
;
700 case dw_cfi_oprnd_addr
:
701 return (a
->dw_cfi_addr
== b
->dw_cfi_addr
702 || strcmp (a
->dw_cfi_addr
, b
->dw_cfi_addr
) == 0);
703 case dw_cfi_oprnd_loc
:
704 return loc_descr_equal_p (a
->dw_cfi_loc
, b
->dw_cfi_loc
);
705 case dw_cfi_oprnd_cfa_loc
:
706 return cfa_equal_p (a
->dw_cfi_cfa_loc
, b
->dw_cfi_cfa_loc
);
711 /* Determine if two CFI entries are identical. */
714 cfi_equal_p (dw_cfi_ref a
, dw_cfi_ref b
)
716 enum dwarf_call_frame_info opc
;
718 /* Make things easier for our callers, including missing operands. */
721 if (a
== NULL
|| b
== NULL
)
724 /* Obviously, the opcodes must match. */
726 if (opc
!= b
->dw_cfi_opc
)
729 /* Compare the two operands, re-using the type of the operands as
730 already exposed elsewhere. */
731 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc
),
732 &a
->dw_cfi_oprnd1
, &b
->dw_cfi_oprnd1
)
733 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc
),
734 &a
->dw_cfi_oprnd2
, &b
->dw_cfi_oprnd2
));
737 /* Determine if two CFI_ROW structures are identical. */
740 cfi_row_equal_p (dw_cfi_row
*a
, dw_cfi_row
*b
)
742 size_t i
, n_a
, n_b
, n_max
;
746 if (!cfi_equal_p (a
->cfa_cfi
, b
->cfa_cfi
))
749 else if (!cfa_equal_p (&a
->cfa
, &b
->cfa
))
752 n_a
= vec_safe_length (a
->reg_save
);
753 n_b
= vec_safe_length (b
->reg_save
);
754 n_max
= MAX (n_a
, n_b
);
756 for (i
= 0; i
< n_max
; ++i
)
758 dw_cfi_ref r_a
= NULL
, r_b
= NULL
;
761 r_a
= (*a
->reg_save
)[i
];
763 r_b
= (*b
->reg_save
)[i
];
765 if (!cfi_equal_p (r_a
, r_b
))
772 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
773 what opcode to emit. Returns the CFI opcode to effect the change, or
774 NULL if NEW_CFA == OLD_CFA. */
777 def_cfa_0 (dw_cfa_location
*old_cfa
, dw_cfa_location
*new_cfa
)
781 /* If nothing changed, no need to issue any call frame instructions. */
782 if (cfa_equal_p (old_cfa
, new_cfa
))
787 HOST_WIDE_INT const_offset
;
788 if (new_cfa
->reg
== old_cfa
->reg
789 && !new_cfa
->indirect
790 && !old_cfa
->indirect
791 && new_cfa
->offset
.is_constant (&const_offset
))
793 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
794 the CFA register did not change but the offset did. The data
795 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
796 in the assembler via the .cfi_def_cfa_offset directive. */
797 if (const_offset
< 0)
798 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
800 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
801 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= const_offset
;
803 else if (new_cfa
->offset
.is_constant ()
804 && known_eq (new_cfa
->offset
, old_cfa
->offset
)
805 && old_cfa
->reg
!= INVALID_REGNUM
806 && !new_cfa
->indirect
807 && !old_cfa
->indirect
)
809 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
810 indicating the CFA register has changed to <register> but the
811 offset has not changed. This requires the old CFA to have
812 been set as a register plus offset rather than a general
813 DW_CFA_def_cfa_expression. */
814 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
815 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
817 else if (new_cfa
->indirect
== 0
818 && new_cfa
->offset
.is_constant (&const_offset
))
820 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
821 indicating the CFA register has changed to <register> with
822 the specified offset. The data factoring for DW_CFA_def_cfa_sf
823 happens in output_cfi, or in the assembler via the .cfi_def_cfa
825 if (const_offset
< 0)
826 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
828 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
829 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
830 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= const_offset
;
834 /* Construct a DW_CFA_def_cfa_expression instruction to
835 calculate the CFA using a full location expression since no
836 register-offset pair is available. */
837 struct dw_loc_descr_node
*loc_list
;
839 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
840 loc_list
= build_cfa_loc (new_cfa
, 0);
841 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
842 if (!new_cfa
->offset
.is_constant ()
843 || !new_cfa
->base_offset
.is_constant ())
844 /* It's hard to reconstruct the CFA location for a polynomial
845 expression, so just cache it instead. */
846 cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
= copy_cfa (new_cfa
);
848 cfi
->dw_cfi_oprnd2
.dw_cfi_cfa_loc
= NULL
;
854 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
857 def_cfa_1 (dw_cfa_location
*new_cfa
)
861 if (cur_trace
->cfa_store
.reg
== new_cfa
->reg
&& new_cfa
->indirect
== 0)
862 cur_trace
->cfa_store
.offset
= new_cfa
->offset
;
864 cfi
= def_cfa_0 (&cur_row
->cfa
, new_cfa
);
867 cur_row
->cfa
= *new_cfa
;
868 cur_row
->cfa_cfi
= (cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
875 /* Add the CFI for saving a register. REG is the CFA column number.
876 If SREG is -1, the register is saved at OFFSET from the CFA;
877 otherwise it is saved in SREG. */
880 reg_save (unsigned int reg
, unsigned int sreg
, poly_int64 offset
)
882 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
883 dw_cfi_ref cfi
= new_cfi ();
885 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
887 if (sreg
== INVALID_REGNUM
)
889 HOST_WIDE_INT const_offset
;
890 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
891 if (fde
&& fde
->stack_realign
)
893 cfi
->dw_cfi_opc
= DW_CFA_expression
;
894 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
895 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
896 = build_cfa_aligned_loc (&cur_row
->cfa
, offset
,
897 fde
->stack_realignment
);
899 else if (offset
.is_constant (&const_offset
))
901 if (need_data_align_sf_opcode (const_offset
))
902 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
903 else if (reg
& ~0x3f)
904 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
906 cfi
->dw_cfi_opc
= DW_CFA_offset
;
907 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= const_offset
;
911 cfi
->dw_cfi_opc
= DW_CFA_expression
;
912 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
913 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
914 = build_cfa_loc (&cur_row
->cfa
, offset
);
917 else if (sreg
== reg
)
919 /* While we could emit something like DW_CFA_same_value or
920 DW_CFA_restore, we never expect to see something like that
921 in a prologue. This is more likely to be a bug. A backend
922 can always bypass this by using REG_CFA_RESTORE directly. */
927 cfi
->dw_cfi_opc
= DW_CFA_register
;
928 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
932 update_row_reg_save (cur_row
, reg
, cfi
);
935 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
936 and adjust data structures to match. */
939 notice_args_size (rtx_insn
*insn
)
941 poly_int64 args_size
, delta
;
944 note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL
);
948 if (!cur_trace
->eh_head
)
949 cur_trace
->args_size_defined_for_eh
= true;
951 args_size
= get_args_size (note
);
952 delta
= args_size
- cur_trace
->end_true_args_size
;
953 if (known_eq (delta
, 0))
956 cur_trace
->end_true_args_size
= args_size
;
958 /* If the CFA is computed off the stack pointer, then we must adjust
959 the computation of the CFA as well. */
960 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
962 gcc_assert (!cur_cfa
->indirect
);
964 /* Convert a change in args_size (always a positive in the
965 direction of stack growth) to a change in stack pointer. */
966 if (!STACK_GROWS_DOWNWARD
)
969 cur_cfa
->offset
+= delta
;
973 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
974 data within the trace related to EH insns and args_size. */
977 notice_eh_throw (rtx_insn
*insn
)
979 poly_int64 args_size
= cur_trace
->end_true_args_size
;
980 if (cur_trace
->eh_head
== NULL
)
982 cur_trace
->eh_head
= insn
;
983 cur_trace
->beg_delay_args_size
= args_size
;
984 cur_trace
->end_delay_args_size
= args_size
;
986 else if (maybe_ne (cur_trace
->end_delay_args_size
, args_size
))
988 cur_trace
->end_delay_args_size
= args_size
;
990 /* ??? If the CFA is the stack pointer, search backward for the last
991 CFI note and insert there. Given that the stack changed for the
992 args_size change, there *must* be such a note in between here and
994 add_cfi_args_size (args_size
);
998 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
999 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1000 used in places where rtl is prohibited. */
1002 static inline unsigned
1003 dwf_regno (const_rtx reg
)
1005 gcc_assert (REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
1006 return DWARF_FRAME_REGNUM (REGNO (reg
));
1009 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1012 compare_reg_or_pc (rtx x
, rtx y
)
1014 if (REG_P (x
) && REG_P (y
))
1015 return REGNO (x
) == REGNO (y
);
1019 /* Record SRC as being saved in DEST. DEST may be null to delete an
1020 existing entry. SRC may be a register or PC_RTX. */
1023 record_reg_saved_in_reg (rtx dest
, rtx src
)
1025 reg_saved_in_data
*elt
;
1028 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, elt
)
1029 if (compare_reg_or_pc (elt
->orig_reg
, src
))
1032 cur_trace
->regs_saved_in_regs
.unordered_remove (i
);
1034 elt
->saved_in_reg
= dest
;
1041 reg_saved_in_data e
= {src
, dest
};
1042 cur_trace
->regs_saved_in_regs
.safe_push (e
);
1045 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1046 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1049 queue_reg_save (rtx reg
, rtx sreg
, poly_int64 offset
)
1052 queued_reg_save e
= {reg
, sreg
, offset
};
1055 /* Duplicates waste space, but it's also necessary to remove them
1056 for correctness, since the queue gets output in reverse order. */
1057 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1058 if (compare_reg_or_pc (q
->reg
, reg
))
1064 queued_reg_saves
.safe_push (e
);
1067 /* Output all the entries in QUEUED_REG_SAVES. */
1070 dwarf2out_flush_queued_reg_saves (void)
1075 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1077 unsigned int reg
, sreg
;
1079 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1081 if (q
->reg
== pc_rtx
)
1082 reg
= DWARF_FRAME_RETURN_COLUMN
;
1084 reg
= dwf_regno (q
->reg
);
1086 sreg
= dwf_regno (q
->saved_reg
);
1088 sreg
= INVALID_REGNUM
;
1089 reg_save (reg
, sreg
, q
->cfa_offset
);
1092 queued_reg_saves
.truncate (0);
1095 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1096 location for? Or, does it clobber a register which we've previously
1097 said that some other register is saved in, and for which we now
1098 have a new location for? */
1101 clobbers_queued_reg_save (const_rtx insn
)
1106 FOR_EACH_VEC_ELT (queued_reg_saves
, iq
, q
)
1109 reg_saved_in_data
*rir
;
1111 if (modified_in_p (q
->reg
, insn
))
1114 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, ir
, rir
)
1115 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1116 && modified_in_p (rir
->saved_in_reg
, insn
))
1123 /* What register, if any, is currently saved in REG? */
1126 reg_saved_in (rtx reg
)
1128 unsigned int regn
= REGNO (reg
);
1130 reg_saved_in_data
*rir
;
1133 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1134 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1137 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, rir
)
1138 if (regn
== REGNO (rir
->saved_in_reg
))
1139 return rir
->orig_reg
;
1144 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1147 dwarf2out_frame_debug_def_cfa (rtx pat
)
1149 memset (cur_cfa
, 0, sizeof (*cur_cfa
));
1151 pat
= strip_offset (pat
, &cur_cfa
->offset
);
1154 cur_cfa
->indirect
= 1;
1155 pat
= strip_offset (XEXP (pat
, 0), &cur_cfa
->base_offset
);
1157 /* ??? If this fails, we could be calling into the _loc functions to
1158 define a full expression. So far no port does that. */
1159 gcc_assert (REG_P (pat
));
1160 cur_cfa
->reg
= dwf_regno (pat
);
1163 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1166 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1170 gcc_assert (GET_CODE (pat
) == SET
);
1171 dest
= XEXP (pat
, 0);
1172 src
= XEXP (pat
, 1);
1174 switch (GET_CODE (src
))
1177 gcc_assert (dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1178 cur_cfa
->offset
-= rtx_to_poly_int64 (XEXP (src
, 1));
1188 cur_cfa
->reg
= dwf_regno (dest
);
1189 gcc_assert (cur_cfa
->indirect
== 0);
1192 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1195 dwarf2out_frame_debug_cfa_offset (rtx set
)
1198 rtx src
, addr
, span
;
1199 unsigned int sregno
;
1201 src
= XEXP (set
, 1);
1202 addr
= XEXP (set
, 0);
1203 gcc_assert (MEM_P (addr
));
1204 addr
= XEXP (addr
, 0);
1206 /* As documented, only consider extremely simple addresses. */
1207 switch (GET_CODE (addr
))
1210 gcc_assert (dwf_regno (addr
) == cur_cfa
->reg
);
1211 offset
= -cur_cfa
->offset
;
1214 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cur_cfa
->reg
);
1215 offset
= rtx_to_poly_int64 (XEXP (addr
, 1)) - cur_cfa
->offset
;
1224 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1228 span
= targetm
.dwarf_register_span (src
);
1229 sregno
= dwf_regno (src
);
1232 /* ??? We'd like to use queue_reg_save, but we need to come up with
1233 a different flushing heuristic for epilogues. */
1235 reg_save (sregno
, INVALID_REGNUM
, offset
);
1238 /* We have a PARALLEL describing where the contents of SRC live.
1239 Adjust the offset for each piece of the PARALLEL. */
1240 poly_int64 span_offset
= offset
;
1242 gcc_assert (GET_CODE (span
) == PARALLEL
);
1244 const int par_len
= XVECLEN (span
, 0);
1245 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1247 rtx elem
= XVECEXP (span
, 0, par_index
);
1248 sregno
= dwf_regno (src
);
1249 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1250 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1255 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1258 dwarf2out_frame_debug_cfa_register (rtx set
)
1261 unsigned sregno
, dregno
;
1263 src
= XEXP (set
, 1);
1264 dest
= XEXP (set
, 0);
1266 record_reg_saved_in_reg (dest
, src
);
1268 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1270 sregno
= dwf_regno (src
);
1272 dregno
= dwf_regno (dest
);
1274 /* ??? We'd like to use queue_reg_save, but we need to come up with
1275 a different flushing heuristic for epilogues. */
1276 reg_save (sregno
, dregno
, 0);
1279 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1282 dwarf2out_frame_debug_cfa_expression (rtx set
)
1284 rtx src
, dest
, span
;
1285 dw_cfi_ref cfi
= new_cfi ();
1288 dest
= SET_DEST (set
);
1289 src
= SET_SRC (set
);
1291 gcc_assert (REG_P (src
));
1292 gcc_assert (MEM_P (dest
));
1294 span
= targetm
.dwarf_register_span (src
);
1297 regno
= dwf_regno (src
);
1299 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1300 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1301 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1302 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1303 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1305 /* ??? We'd like to use queue_reg_save, were the interface different,
1306 and, as above, we could manage flushing for epilogues. */
1308 update_row_reg_save (cur_row
, regno
, cfi
);
1311 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1315 dwarf2out_frame_debug_cfa_val_expression (rtx set
)
1317 rtx dest
= SET_DEST (set
);
1318 gcc_assert (REG_P (dest
));
1320 rtx span
= targetm
.dwarf_register_span (dest
);
1323 rtx src
= SET_SRC (set
);
1324 dw_cfi_ref cfi
= new_cfi ();
1325 cfi
->dw_cfi_opc
= DW_CFA_val_expression
;
1326 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= dwf_regno (dest
);
1327 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1328 = mem_loc_descriptor (src
, GET_MODE (src
),
1329 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1331 update_row_reg_save (cur_row
, dwf_regno (dest
), cfi
);
1334 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1337 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1339 gcc_assert (REG_P (reg
));
1341 rtx span
= targetm
.dwarf_register_span (reg
);
1344 unsigned int regno
= dwf_regno (reg
);
1345 add_cfi_restore (regno
);
1346 update_row_reg_save (cur_row
, regno
, NULL
);
1350 /* We have a PARALLEL describing where the contents of REG live.
1351 Restore the register for each piece of the PARALLEL. */
1352 gcc_assert (GET_CODE (span
) == PARALLEL
);
1354 const int par_len
= XVECLEN (span
, 0);
1355 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1357 reg
= XVECEXP (span
, 0, par_index
);
1358 gcc_assert (REG_P (reg
));
1359 unsigned int regno
= dwf_regno (reg
);
1360 add_cfi_restore (regno
);
1361 update_row_reg_save (cur_row
, regno
, NULL
);
1366 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1367 ??? Perhaps we should note in the CIE where windows are saved (instead of
1368 assuming 0(cfa)) and what registers are in the window. */
1371 dwarf2out_frame_debug_cfa_window_save (void)
1373 dw_cfi_ref cfi
= new_cfi ();
1375 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1379 /* Record call frame debugging information for an expression EXPR,
1380 which either sets SP or FP (adjusting how we calculate the frame
1381 address) or saves a register to the stack or another register.
1382 LABEL indicates the address of EXPR.
1384 This function encodes a state machine mapping rtxes to actions on
1385 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1386 users need not read the source code.
1388 The High-Level Picture
1390 Changes in the register we use to calculate the CFA: Currently we
1391 assume that if you copy the CFA register into another register, we
1392 should take the other one as the new CFA register; this seems to
1393 work pretty well. If it's wrong for some target, it's simple
1394 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1396 Changes in the register we use for saving registers to the stack:
1397 This is usually SP, but not always. Again, we deduce that if you
1398 copy SP into another register (and SP is not the CFA register),
1399 then the new register is the one we will be using for register
1400 saves. This also seems to work.
1402 Register saves: There's not much guesswork about this one; if
1403 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1404 register save, and the register used to calculate the destination
1405 had better be the one we think we're using for this purpose.
1406 It's also assumed that a copy from a call-saved register to another
1407 register is saving that register if RTX_FRAME_RELATED_P is set on
1408 that instruction. If the copy is from a call-saved register to
1409 the *same* register, that means that the register is now the same
1410 value as in the caller.
1412 Except: If the register being saved is the CFA register, and the
1413 offset is nonzero, we are saving the CFA, so we assume we have to
1414 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1415 the intent is to save the value of SP from the previous frame.
1417 In addition, if a register has previously been saved to a different
1420 Invariants / Summaries of Rules
1422 cfa current rule for calculating the CFA. It usually
1423 consists of a register and an offset. This is
1424 actually stored in *cur_cfa, but abbreviated
1425 for the purposes of this documentation.
1426 cfa_store register used by prologue code to save things to the stack
1427 cfa_store.offset is the offset from the value of
1428 cfa_store.reg to the actual CFA
1429 cfa_temp register holding an integral value. cfa_temp.offset
1430 stores the value, which will be used to adjust the
1431 stack pointer. cfa_temp is also used like cfa_store,
1432 to track stores to the stack via fp or a temp reg.
1434 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1435 with cfa.reg as the first operand changes the cfa.reg and its
1436 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1439 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1440 expression yielding a constant. This sets cfa_temp.reg
1441 and cfa_temp.offset.
1443 Rule 5: Create a new register cfa_store used to save items to the
1446 Rules 10-14: Save a register to the stack. Define offset as the
1447 difference of the original location and cfa_store's
1448 location (or cfa_temp's location if cfa_temp is used).
1450 Rules 16-20: If AND operation happens on sp in prologue, we assume
1451 stack is realigned. We will use a group of DW_OP_XXX
1452 expressions to represent the location of the stored
1453 register instead of CFA+offset.
1457 "{a,b}" indicates a choice of a xor b.
1458 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1461 (set <reg1> <reg2>:cfa.reg)
1462 effects: cfa.reg = <reg1>
1463 cfa.offset unchanged
1464 cfa_temp.reg = <reg1>
1465 cfa_temp.offset = cfa.offset
1468 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1469 {<const_int>,<reg>:cfa_temp.reg}))
1470 effects: cfa.reg = sp if fp used
1471 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1472 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1473 if cfa_store.reg==sp
1476 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1477 effects: cfa.reg = fp
1478 cfa_offset += +/- <const_int>
1481 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1482 constraints: <reg1> != fp
1484 effects: cfa.reg = <reg1>
1485 cfa_temp.reg = <reg1>
1486 cfa_temp.offset = cfa.offset
1489 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1490 constraints: <reg1> != fp
1492 effects: cfa_store.reg = <reg1>
1493 cfa_store.offset = cfa.offset - cfa_temp.offset
1496 (set <reg> <const_int>)
1497 effects: cfa_temp.reg = <reg>
1498 cfa_temp.offset = <const_int>
1501 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1502 effects: cfa_temp.reg = <reg1>
1503 cfa_temp.offset |= <const_int>
1506 (set <reg> (high <exp>))
1510 (set <reg> (lo_sum <exp> <const_int>))
1511 effects: cfa_temp.reg = <reg>
1512 cfa_temp.offset = <const_int>
1515 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1516 effects: cfa_store.offset -= <const_int>
1517 cfa.offset = cfa_store.offset if cfa.reg == sp
1519 cfa.base_offset = -cfa_store.offset
1522 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1523 effects: cfa_store.offset += -/+ mode_size(mem)
1524 cfa.offset = cfa_store.offset if cfa.reg == sp
1526 cfa.base_offset = -cfa_store.offset
1529 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1532 effects: cfa.reg = <reg1>
1533 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1536 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1537 effects: cfa.reg = <reg1>
1538 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1541 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1542 effects: cfa.reg = <reg1>
1543 cfa.base_offset = -cfa_temp.offset
1544 cfa_temp.offset -= mode_size(mem)
1547 (set <reg> {unspec, unspec_volatile})
1548 effects: target-dependent
1551 (set sp (and: sp <const_int>))
1552 constraints: cfa_store.reg == sp
1553 effects: cfun->fde.stack_realign = 1
1554 cfa_store.offset = 0
1555 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1558 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1559 effects: cfa_store.offset += -/+ mode_size(mem)
1562 (set (mem ({pre_inc, pre_dec} sp)) fp)
1563 constraints: fde->stack_realign == 1
1564 effects: cfa_store.offset = 0
1565 cfa.reg != HARD_FRAME_POINTER_REGNUM
1568 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1569 constraints: fde->stack_realign == 1
1571 && cfa.indirect == 0
1572 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1573 effects: Use DW_CFA_def_cfa_expression to define cfa
1574 cfa.reg == fde->drap_reg */
1577 dwarf2out_frame_debug_expr (rtx expr
)
1579 rtx src
, dest
, span
;
1583 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1584 the PARALLEL independently. The first element is always processed if
1585 it is a SET. This is for backward compatibility. Other elements
1586 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1587 flag is set in them. */
1588 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1591 int limit
= XVECLEN (expr
, 0);
1594 /* PARALLELs have strict read-modify-write semantics, so we
1595 ought to evaluate every rvalue before changing any lvalue.
1596 It's cumbersome to do that in general, but there's an
1597 easy approximation that is enough for all current users:
1598 handle register saves before register assignments. */
1599 if (GET_CODE (expr
) == PARALLEL
)
1600 for (par_index
= 0; par_index
< limit
; par_index
++)
1602 elem
= XVECEXP (expr
, 0, par_index
);
1603 if (GET_CODE (elem
) == SET
1604 && MEM_P (SET_DEST (elem
))
1605 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1606 dwarf2out_frame_debug_expr (elem
);
1609 for (par_index
= 0; par_index
< limit
; par_index
++)
1611 elem
= XVECEXP (expr
, 0, par_index
);
1612 if (GET_CODE (elem
) == SET
1613 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1614 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1615 dwarf2out_frame_debug_expr (elem
);
1620 gcc_assert (GET_CODE (expr
) == SET
);
1622 src
= SET_SRC (expr
);
1623 dest
= SET_DEST (expr
);
1627 rtx rsi
= reg_saved_in (src
);
1634 switch (GET_CODE (dest
))
1637 switch (GET_CODE (src
))
1639 /* Setting FP from SP. */
1641 if (cur_cfa
->reg
== dwf_regno (src
))
1644 /* Update the CFA rule wrt SP or FP. Make sure src is
1645 relative to the current CFA register.
1647 We used to require that dest be either SP or FP, but the
1648 ARM copies SP to a temporary register, and from there to
1649 FP. So we just rely on the backends to only set
1650 RTX_FRAME_RELATED_P on appropriate insns. */
1651 cur_cfa
->reg
= dwf_regno (dest
);
1652 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1653 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1657 /* Saving a register in a register. */
1658 gcc_assert (!fixed_regs
[REGNO (dest
)]
1659 /* For the SPARC and its register window. */
1660 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1662 /* After stack is aligned, we can only save SP in FP
1663 if drap register is used. In this case, we have
1664 to restore stack pointer with the CFA value and we
1665 don't generate this DWARF information. */
1667 && fde
->stack_realign
1668 && REGNO (src
) == STACK_POINTER_REGNUM
)
1669 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1670 && fde
->drap_reg
!= INVALID_REGNUM
1671 && cur_cfa
->reg
!= dwf_regno (src
));
1673 queue_reg_save (src
, dest
, 0);
1680 if (dest
== stack_pointer_rtx
)
1684 if (REG_P (XEXP (src
, 1)))
1686 gcc_assert (dwf_regno (XEXP (src
, 1))
1687 == cur_trace
->cfa_temp
.reg
);
1688 offset
= cur_trace
->cfa_temp
.offset
;
1690 else if (!poly_int_rtx_p (XEXP (src
, 1), &offset
))
1693 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1695 /* Restoring SP from FP in the epilogue. */
1696 gcc_assert (cur_cfa
->reg
== dw_frame_pointer_regnum
);
1697 cur_cfa
->reg
= dw_stack_pointer_regnum
;
1699 else if (GET_CODE (src
) == LO_SUM
)
1700 /* Assume we've set the source reg of the LO_SUM from sp. */
1703 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1705 if (GET_CODE (src
) != MINUS
)
1707 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1708 cur_cfa
->offset
+= offset
;
1709 if (cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
)
1710 cur_trace
->cfa_store
.offset
+= offset
;
1712 else if (dest
== hard_frame_pointer_rtx
)
1715 /* Either setting the FP from an offset of the SP,
1716 or adjusting the FP */
1717 gcc_assert (frame_pointer_needed
);
1719 gcc_assert (REG_P (XEXP (src
, 0))
1720 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1721 offset
= rtx_to_poly_int64 (XEXP (src
, 1));
1722 if (GET_CODE (src
) != MINUS
)
1724 cur_cfa
->offset
+= offset
;
1725 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1729 gcc_assert (GET_CODE (src
) != MINUS
);
1732 if (REG_P (XEXP (src
, 0))
1733 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1734 && poly_int_rtx_p (XEXP (src
, 1), &offset
))
1736 /* Setting a temporary CFA register that will be copied
1737 into the FP later on. */
1739 cur_cfa
->offset
+= offset
;
1740 cur_cfa
->reg
= dwf_regno (dest
);
1741 /* Or used to save regs to the stack. */
1742 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1743 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1747 else if (REG_P (XEXP (src
, 0))
1748 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1749 && XEXP (src
, 1) == stack_pointer_rtx
)
1751 /* Setting a scratch register that we will use instead
1752 of SP for saving registers to the stack. */
1753 gcc_assert (cur_cfa
->reg
== dw_stack_pointer_regnum
);
1754 cur_trace
->cfa_store
.reg
= dwf_regno (dest
);
1755 cur_trace
->cfa_store
.offset
1756 = cur_cfa
->offset
- cur_trace
->cfa_temp
.offset
;
1760 else if (GET_CODE (src
) == LO_SUM
1761 && poly_int_rtx_p (XEXP (src
, 1),
1762 &cur_trace
->cfa_temp
.offset
))
1763 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1772 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1773 cur_trace
->cfa_temp
.offset
= rtx_to_poly_int64 (src
);
1778 gcc_assert (REG_P (XEXP (src
, 0))
1779 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1780 && CONST_INT_P (XEXP (src
, 1)));
1782 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1783 if (!can_ior_p (cur_trace
->cfa_temp
.offset
, INTVAL (XEXP (src
, 1)),
1784 &cur_trace
->cfa_temp
.offset
))
1785 /* The target shouldn't generate this kind of CFI note if we
1786 can't represent it. */
1790 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1791 which will fill in all of the bits. */
1798 case UNSPEC_VOLATILE
:
1799 /* All unspecs should be represented by REG_CFA_* notes. */
1805 /* If this AND operation happens on stack pointer in prologue,
1806 we assume the stack is realigned and we extract the
1808 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1810 /* We interpret reg_save differently with stack_realign set.
1811 Thus we must flush whatever we have queued first. */
1812 dwarf2out_flush_queued_reg_saves ();
1814 gcc_assert (cur_trace
->cfa_store
.reg
1815 == dwf_regno (XEXP (src
, 0)));
1816 fde
->stack_realign
= 1;
1817 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1818 cur_trace
->cfa_store
.offset
= 0;
1820 if (cur_cfa
->reg
!= dw_stack_pointer_regnum
1821 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1822 fde
->drap_reg
= cur_cfa
->reg
;
1833 /* Saving a register to the stack. Make sure dest is relative to the
1835 switch (GET_CODE (XEXP (dest
, 0)))
1841 /* We can't handle variable size modifications. */
1842 offset
= -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1844 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1845 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1847 cur_trace
->cfa_store
.offset
+= offset
;
1848 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1849 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1851 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1852 offset
-= cur_trace
->cfa_store
.offset
;
1854 offset
= -cur_trace
->cfa_store
.offset
;
1861 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1862 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1865 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1866 == STACK_POINTER_REGNUM
)
1867 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1869 cur_trace
->cfa_store
.offset
+= offset
;
1871 /* Rule 18: If stack is aligned, we will use FP as a
1872 reference to represent the address of the stored
1875 && fde
->stack_realign
1877 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
)
1879 gcc_assert (cur_cfa
->reg
!= dw_frame_pointer_regnum
);
1880 cur_trace
->cfa_store
.offset
= 0;
1883 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1884 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1886 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1887 offset
+= -cur_trace
->cfa_store
.offset
;
1889 offset
= -cur_trace
->cfa_store
.offset
;
1893 /* With an offset. */
1900 gcc_assert (REG_P (XEXP (XEXP (dest
, 0), 0)));
1901 offset
= rtx_to_poly_int64 (XEXP (XEXP (dest
, 0), 1));
1902 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1905 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1907 if (cur_cfa
->reg
== regno
)
1908 offset
-= cur_cfa
->offset
;
1909 else if (cur_trace
->cfa_store
.reg
== regno
)
1910 offset
-= cur_trace
->cfa_store
.offset
;
1913 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1914 offset
-= cur_trace
->cfa_temp
.offset
;
1920 /* Without an offset. */
1923 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1925 if (cur_cfa
->reg
== regno
)
1926 offset
= -cur_cfa
->offset
;
1927 else if (cur_trace
->cfa_store
.reg
== regno
)
1928 offset
= -cur_trace
->cfa_store
.offset
;
1931 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1932 offset
= -cur_trace
->cfa_temp
.offset
;
1939 gcc_assert (cur_trace
->cfa_temp
.reg
1940 == dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1941 offset
= -cur_trace
->cfa_temp
.offset
;
1942 cur_trace
->cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1950 /* If the source operand of this MEM operation is a memory,
1951 we only care how much stack grew. */
1956 && REGNO (src
) != STACK_POINTER_REGNUM
1957 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1958 && dwf_regno (src
) == cur_cfa
->reg
)
1960 /* We're storing the current CFA reg into the stack. */
1962 if (known_eq (cur_cfa
->offset
, 0))
1965 /* If stack is aligned, putting CFA reg into stack means
1966 we can no longer use reg + offset to represent CFA.
1967 Here we use DW_CFA_def_cfa_expression instead. The
1968 result of this expression equals to the original CFA
1971 && fde
->stack_realign
1972 && cur_cfa
->indirect
== 0
1973 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1975 gcc_assert (fde
->drap_reg
== cur_cfa
->reg
);
1977 cur_cfa
->indirect
= 1;
1978 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1979 cur_cfa
->base_offset
= offset
;
1980 cur_cfa
->offset
= 0;
1982 fde
->drap_reg_saved
= 1;
1986 /* If the source register is exactly the CFA, assume
1987 we're saving SP like any other register; this happens
1989 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1994 /* Otherwise, we'll need to look in the stack to
1995 calculate the CFA. */
1996 rtx x
= XEXP (dest
, 0);
2000 gcc_assert (REG_P (x
));
2002 cur_cfa
->reg
= dwf_regno (x
);
2003 cur_cfa
->base_offset
= offset
;
2004 cur_cfa
->indirect
= 1;
2010 span
= targetm
.dwarf_register_span (src
);
2015 queue_reg_save (src
, NULL_RTX
, offset
);
2018 /* We have a PARALLEL describing where the contents of SRC live.
2019 Queue register saves for each piece of the PARALLEL. */
2020 poly_int64 span_offset
= offset
;
2022 gcc_assert (GET_CODE (span
) == PARALLEL
);
2024 const int par_len
= XVECLEN (span
, 0);
2025 for (int par_index
= 0; par_index
< par_len
; par_index
++)
2027 rtx elem
= XVECEXP (span
, 0, par_index
);
2028 queue_reg_save (elem
, NULL_RTX
, span_offset
);
2029 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
2039 /* Record call frame debugging information for INSN, which either sets
2040 SP or FP (adjusting how we calculate the frame address) or saves a
2041 register to the stack. */
2044 dwarf2out_frame_debug (rtx_insn
*insn
)
2047 bool handled_one
= false;
2049 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2050 switch (REG_NOTE_KIND (note
))
2052 case REG_FRAME_RELATED_EXPR
:
2053 pat
= XEXP (note
, 0);
2056 case REG_CFA_DEF_CFA
:
2057 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2061 case REG_CFA_ADJUST_CFA
:
2066 if (GET_CODE (n
) == PARALLEL
)
2067 n
= XVECEXP (n
, 0, 0);
2069 dwarf2out_frame_debug_adjust_cfa (n
);
2073 case REG_CFA_OFFSET
:
2076 n
= single_set (insn
);
2077 dwarf2out_frame_debug_cfa_offset (n
);
2081 case REG_CFA_REGISTER
:
2086 if (GET_CODE (n
) == PARALLEL
)
2087 n
= XVECEXP (n
, 0, 0);
2089 dwarf2out_frame_debug_cfa_register (n
);
2093 case REG_CFA_EXPRESSION
:
2094 case REG_CFA_VAL_EXPRESSION
:
2097 n
= single_set (insn
);
2099 if (REG_NOTE_KIND (note
) == REG_CFA_EXPRESSION
)
2100 dwarf2out_frame_debug_cfa_expression (n
);
2102 dwarf2out_frame_debug_cfa_val_expression (n
);
2107 case REG_CFA_RESTORE
:
2112 if (GET_CODE (n
) == PARALLEL
)
2113 n
= XVECEXP (n
, 0, 0);
2116 dwarf2out_frame_debug_cfa_restore (n
);
2120 case REG_CFA_SET_VDRAP
:
2124 dw_fde_ref fde
= cfun
->fde
;
2127 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2129 fde
->vdrap_reg
= dwf_regno (n
);
2135 case REG_CFA_TOGGLE_RA_MANGLE
:
2136 case REG_CFA_WINDOW_SAVE
:
2137 /* We overload both of these operations onto the same DWARF opcode. */
2138 dwarf2out_frame_debug_cfa_window_save ();
2142 case REG_CFA_FLUSH_QUEUE
:
2143 /* The actual flush happens elsewhere. */
2153 pat
= PATTERN (insn
);
2155 dwarf2out_frame_debug_expr (pat
);
2157 /* Check again. A parallel can save and update the same register.
2158 We could probably check just once, here, but this is safer than
2159 removing the check at the start of the function. */
2160 if (clobbers_queued_reg_save (pat
))
2161 dwarf2out_flush_queued_reg_saves ();
2165 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2168 change_cfi_row (dw_cfi_row
*old_row
, dw_cfi_row
*new_row
)
2170 size_t i
, n_old
, n_new
, n_max
;
2173 if (new_row
->cfa_cfi
&& !cfi_equal_p (old_row
->cfa_cfi
, new_row
->cfa_cfi
))
2174 add_cfi (new_row
->cfa_cfi
);
2177 cfi
= def_cfa_0 (&old_row
->cfa
, &new_row
->cfa
);
2182 n_old
= vec_safe_length (old_row
->reg_save
);
2183 n_new
= vec_safe_length (new_row
->reg_save
);
2184 n_max
= MAX (n_old
, n_new
);
2186 for (i
= 0; i
< n_max
; ++i
)
2188 dw_cfi_ref r_old
= NULL
, r_new
= NULL
;
2191 r_old
= (*old_row
->reg_save
)[i
];
2193 r_new
= (*new_row
->reg_save
)[i
];
2197 else if (r_new
== NULL
)
2198 add_cfi_restore (i
);
2199 else if (!cfi_equal_p (r_old
, r_new
))
2204 /* Examine CFI and return true if a cfi label and set_loc is needed
2205 beforehand. Even when generating CFI assembler instructions, we
2206 still have to add the cfi to the list so that lookup_cfa_1 works
2207 later on. When -g2 and above we even need to force emitting of
2208 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2209 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2210 and so don't use convert_cfa_to_fb_loc_list. */
2213 cfi_label_required_p (dw_cfi_ref cfi
)
2215 if (!dwarf2out_do_cfi_asm ())
2218 if (dwarf_version
== 2
2219 && debug_info_level
> DINFO_LEVEL_TERSE
2220 && (write_symbols
== DWARF2_DEBUG
2221 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2223 switch (cfi
->dw_cfi_opc
)
2225 case DW_CFA_def_cfa_offset
:
2226 case DW_CFA_def_cfa_offset_sf
:
2227 case DW_CFA_def_cfa_register
:
2228 case DW_CFA_def_cfa
:
2229 case DW_CFA_def_cfa_sf
:
2230 case DW_CFA_def_cfa_expression
:
2231 case DW_CFA_restore_state
:
2240 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2241 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2244 add_cfis_to_fde (void)
2246 dw_fde_ref fde
= cfun
->fde
;
2247 rtx_insn
*insn
, *next
;
2249 for (insn
= get_insns (); insn
; insn
= next
)
2251 next
= NEXT_INSN (insn
);
2253 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2254 fde
->dw_fde_switch_cfi_index
= vec_safe_length (fde
->dw_fde_cfi
);
2256 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2258 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2260 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2262 required
|= cfi_label_required_p (NOTE_CFI (next
));
2263 next
= NEXT_INSN (next
);
2265 else if (active_insn_p (next
)
2266 || (NOTE_P (next
) && (NOTE_KIND (next
)
2267 == NOTE_INSN_SWITCH_TEXT_SECTIONS
)))
2270 next
= NEXT_INSN (next
);
2273 int num
= dwarf2out_cfi_label_num
;
2274 const char *label
= dwarf2out_cfi_label ();
2277 /* Set the location counter to the new label. */
2279 xcfi
->dw_cfi_opc
= DW_CFA_advance_loc4
;
2280 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2281 vec_safe_push (fde
->dw_fde_cfi
, xcfi
);
2283 rtx_note
*tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2284 NOTE_LABEL_NUMBER (tmp
) = num
;
2289 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2290 vec_safe_push (fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2291 insn
= NEXT_INSN (insn
);
2293 while (insn
!= next
);
2298 static void dump_cfi_row (FILE *f
, dw_cfi_row
*row
);
2300 /* If LABEL is the start of a trace, then initialize the state of that
2301 trace from CUR_TRACE and CUR_ROW. */
2304 maybe_record_trace_start (rtx_insn
*start
, rtx_insn
*origin
)
2308 ti
= get_trace_info (start
);
2309 gcc_assert (ti
!= NULL
);
2313 fprintf (dump_file
, " saw edge from trace %u to %u (via %s %d)\n",
2314 cur_trace
->id
, ti
->id
,
2315 (origin
? rtx_name
[(int) GET_CODE (origin
)] : "fallthru"),
2316 (origin
? INSN_UID (origin
) : 0));
2319 poly_int64 args_size
= cur_trace
->end_true_args_size
;
2320 if (ti
->beg_row
== NULL
)
2322 /* This is the first time we've encountered this trace. Propagate
2323 state across the edge and push the trace onto the work list. */
2324 ti
->beg_row
= copy_cfi_row (cur_row
);
2325 ti
->beg_true_args_size
= args_size
;
2327 ti
->cfa_store
= cur_trace
->cfa_store
;
2328 ti
->cfa_temp
= cur_trace
->cfa_temp
;
2329 ti
->regs_saved_in_regs
= cur_trace
->regs_saved_in_regs
.copy ();
2331 trace_work_list
.safe_push (ti
);
2334 fprintf (dump_file
, "\tpush trace %u to worklist\n", ti
->id
);
2339 /* We ought to have the same state incoming to a given trace no
2340 matter how we arrive at the trace. Anything else means we've
2341 got some kind of optimization error. */
2343 if (!cfi_row_equal_p (cur_row
, ti
->beg_row
))
2347 fprintf (dump_file
, "Inconsistent CFI state!\n");
2348 fprintf (dump_file
, "SHOULD have:\n");
2349 dump_cfi_row (dump_file
, ti
->beg_row
);
2350 fprintf (dump_file
, "DO have:\n");
2351 dump_cfi_row (dump_file
, cur_row
);
2358 /* The args_size is allowed to conflict if it isn't actually used. */
2359 if (maybe_ne (ti
->beg_true_args_size
, args_size
))
2360 ti
->args_size_undefined
= true;
2364 /* Similarly, but handle the args_size and CFA reset across EH
2365 and non-local goto edges. */
2368 maybe_record_trace_start_abnormal (rtx_insn
*start
, rtx_insn
*origin
)
2370 poly_int64 save_args_size
, delta
;
2371 dw_cfa_location save_cfa
;
2373 save_args_size
= cur_trace
->end_true_args_size
;
2374 if (known_eq (save_args_size
, 0))
2376 maybe_record_trace_start (start
, origin
);
2380 delta
= -save_args_size
;
2381 cur_trace
->end_true_args_size
= 0;
2383 save_cfa
= cur_row
->cfa
;
2384 if (cur_row
->cfa
.reg
== dw_stack_pointer_regnum
)
2386 /* Convert a change in args_size (always a positive in the
2387 direction of stack growth) to a change in stack pointer. */
2388 if (!STACK_GROWS_DOWNWARD
)
2391 cur_row
->cfa
.offset
+= delta
;
2394 maybe_record_trace_start (start
, origin
);
2396 cur_trace
->end_true_args_size
= save_args_size
;
2397 cur_row
->cfa
= save_cfa
;
2400 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2401 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2404 create_trace_edges (rtx_insn
*insn
)
2411 rtx_jump_table_data
*table
;
2413 if (find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2416 if (tablejump_p (insn
, NULL
, &table
))
2418 rtvec vec
= table
->get_labels ();
2420 n
= GET_NUM_ELEM (vec
);
2421 for (i
= 0; i
< n
; ++i
)
2423 rtx_insn
*lab
= as_a
<rtx_insn
*> (XEXP (RTVEC_ELT (vec
, i
), 0));
2424 maybe_record_trace_start (lab
, insn
);
2427 else if (computed_jump_p (insn
))
2431 FOR_EACH_VEC_SAFE_ELT (forced_labels
, i
, temp
)
2432 maybe_record_trace_start (temp
, insn
);
2434 else if (returnjump_p (insn
))
2436 else if ((tmp
= extract_asm_operands (PATTERN (insn
))) != NULL
)
2438 n
= ASM_OPERANDS_LABEL_LENGTH (tmp
);
2439 for (i
= 0; i
< n
; ++i
)
2442 as_a
<rtx_insn
*> (XEXP (ASM_OPERANDS_LABEL (tmp
, i
), 0));
2443 maybe_record_trace_start (lab
, insn
);
2448 rtx_insn
*lab
= JUMP_LABEL_AS_INSN (insn
);
2449 gcc_assert (lab
!= NULL
);
2450 maybe_record_trace_start (lab
, insn
);
2453 else if (CALL_P (insn
))
2455 /* Sibling calls don't have edges inside this function. */
2456 if (SIBLING_CALL_P (insn
))
2459 /* Process non-local goto edges. */
2460 if (can_nonlocal_goto (insn
))
2461 for (rtx_insn_list
*lab
= nonlocal_goto_handler_labels
;
2464 maybe_record_trace_start_abnormal (lab
->insn (), insn
);
2466 else if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2468 int i
, n
= seq
->len ();
2469 for (i
= 0; i
< n
; ++i
)
2470 create_trace_edges (seq
->insn (i
));
2474 /* Process EH edges. */
2475 if (CALL_P (insn
) || cfun
->can_throw_non_call_exceptions
)
2477 eh_landing_pad lp
= get_eh_landing_pad_from_rtx (insn
);
2479 maybe_record_trace_start_abnormal (lp
->landing_pad
, insn
);
2483 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2486 scan_insn_after (rtx_insn
*insn
)
2488 if (RTX_FRAME_RELATED_P (insn
))
2489 dwarf2out_frame_debug (insn
);
2490 notice_args_size (insn
);
2493 /* Scan the trace beginning at INSN and create the CFI notes for the
2494 instructions therein. */
2497 scan_trace (dw_trace_info
*trace
, bool entry
)
2499 rtx_insn
*prev
, *insn
= trace
->head
;
2500 dw_cfa_location this_cfa
;
2503 fprintf (dump_file
, "Processing trace %u : start at %s %d\n",
2504 trace
->id
, rtx_name
[(int) GET_CODE (insn
)],
2507 trace
->end_row
= copy_cfi_row (trace
->beg_row
);
2508 trace
->end_true_args_size
= trace
->beg_true_args_size
;
2511 cur_row
= trace
->end_row
;
2513 this_cfa
= cur_row
->cfa
;
2514 cur_cfa
= &this_cfa
;
2516 /* If the current function starts with a non-standard incoming frame
2517 sp offset, emit a note before the first instruction. */
2519 && DEFAULT_INCOMING_FRAME_SP_OFFSET
!= INCOMING_FRAME_SP_OFFSET
)
2521 add_cfi_insn
= insn
;
2522 gcc_assert (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED
);
2523 this_cfa
.offset
= INCOMING_FRAME_SP_OFFSET
;
2524 def_cfa_1 (&this_cfa
);
2527 for (prev
= insn
, insn
= NEXT_INSN (insn
);
2529 prev
= insn
, insn
= NEXT_INSN (insn
))
2533 /* Do everything that happens "before" the insn. */
2534 add_cfi_insn
= prev
;
2536 /* Notice the end of a trace. */
2537 if (BARRIER_P (insn
))
2539 /* Don't bother saving the unneeded queued registers at all. */
2540 queued_reg_saves
.truncate (0);
2543 if (save_point_p (insn
))
2545 /* Propagate across fallthru edges. */
2546 dwarf2out_flush_queued_reg_saves ();
2547 maybe_record_trace_start (insn
, NULL
);
2551 if (DEBUG_INSN_P (insn
) || !inside_basic_block_p (insn
))
2554 /* Handle all changes to the row state. Sequences require special
2555 handling for the positioning of the notes. */
2556 if (rtx_sequence
*pat
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2559 int i
, n
= pat
->len ();
2561 control
= pat
->insn (0);
2562 if (can_throw_internal (control
))
2563 notice_eh_throw (control
);
2564 dwarf2out_flush_queued_reg_saves ();
2566 if (JUMP_P (control
) && INSN_ANNULLED_BRANCH_P (control
))
2568 /* ??? Hopefully multiple delay slots are not annulled. */
2569 gcc_assert (n
== 2);
2570 gcc_assert (!RTX_FRAME_RELATED_P (control
));
2571 gcc_assert (!find_reg_note (control
, REG_ARGS_SIZE
, NULL
));
2573 elt
= pat
->insn (1);
2575 if (INSN_FROM_TARGET_P (elt
))
2577 cfi_vec save_row_reg_save
;
2579 /* If ELT is an instruction from target of an annulled
2580 branch, the effects are for the target only and so
2581 the args_size and CFA along the current path
2582 shouldn't change. */
2583 add_cfi_insn
= NULL
;
2584 poly_int64 restore_args_size
= cur_trace
->end_true_args_size
;
2585 cur_cfa
= &cur_row
->cfa
;
2586 save_row_reg_save
= vec_safe_copy (cur_row
->reg_save
);
2588 scan_insn_after (elt
);
2590 /* ??? Should we instead save the entire row state? */
2591 gcc_assert (!queued_reg_saves
.length ());
2593 create_trace_edges (control
);
2595 cur_trace
->end_true_args_size
= restore_args_size
;
2596 cur_row
->cfa
= this_cfa
;
2597 cur_row
->reg_save
= save_row_reg_save
;
2598 cur_cfa
= &this_cfa
;
2602 /* If ELT is a annulled branch-taken instruction (i.e.
2603 executed only when branch is not taken), the args_size
2604 and CFA should not change through the jump. */
2605 create_trace_edges (control
);
2607 /* Update and continue with the trace. */
2608 add_cfi_insn
= insn
;
2609 scan_insn_after (elt
);
2610 def_cfa_1 (&this_cfa
);
2615 /* The insns in the delay slot should all be considered to happen
2616 "before" a call insn. Consider a call with a stack pointer
2617 adjustment in the delay slot. The backtrace from the callee
2618 should include the sp adjustment. Unfortunately, that leaves
2619 us with an unavoidable unwinding error exactly at the call insn
2620 itself. For jump insns we'd prefer to avoid this error by
2621 placing the notes after the sequence. */
2622 if (JUMP_P (control
))
2623 add_cfi_insn
= insn
;
2625 for (i
= 1; i
< n
; ++i
)
2627 elt
= pat
->insn (i
);
2628 scan_insn_after (elt
);
2631 /* Make sure any register saves are visible at the jump target. */
2632 dwarf2out_flush_queued_reg_saves ();
2633 any_cfis_emitted
= false;
2635 /* However, if there is some adjustment on the call itself, e.g.
2636 a call_pop, that action should be considered to happen after
2637 the call returns. */
2638 add_cfi_insn
= insn
;
2639 scan_insn_after (control
);
2643 /* Flush data before calls and jumps, and of course if necessary. */
2644 if (can_throw_internal (insn
))
2646 notice_eh_throw (insn
);
2647 dwarf2out_flush_queued_reg_saves ();
2649 else if (!NONJUMP_INSN_P (insn
)
2650 || clobbers_queued_reg_save (insn
)
2651 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2652 dwarf2out_flush_queued_reg_saves ();
2653 any_cfis_emitted
= false;
2655 add_cfi_insn
= insn
;
2656 scan_insn_after (insn
);
2660 /* Between frame-related-p and args_size we might have otherwise
2661 emitted two cfa adjustments. Do it now. */
2662 def_cfa_1 (&this_cfa
);
2664 /* Minimize the number of advances by emitting the entire queue
2665 once anything is emitted. */
2666 if (any_cfis_emitted
2667 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2668 dwarf2out_flush_queued_reg_saves ();
2670 /* Note that a test for control_flow_insn_p does exactly the
2671 same tests as are done to actually create the edges. So
2672 always call the routine and let it not create edges for
2673 non-control-flow insns. */
2674 create_trace_edges (control
);
2677 add_cfi_insn
= NULL
;
2683 /* Scan the function and create the initial set of CFI notes. */
2686 create_cfi_notes (void)
2690 gcc_checking_assert (!queued_reg_saves
.exists ());
2691 gcc_checking_assert (!trace_work_list
.exists ());
2693 /* Always begin at the entry trace. */
2694 ti
= &trace_info
[0];
2695 scan_trace (ti
, true);
2697 while (!trace_work_list
.is_empty ())
2699 ti
= trace_work_list
.pop ();
2700 scan_trace (ti
, false);
2703 queued_reg_saves
.release ();
2704 trace_work_list
.release ();
2707 /* Return the insn before the first NOTE_INSN_CFI after START. */
2710 before_next_cfi_note (rtx_insn
*start
)
2712 rtx_insn
*prev
= start
;
2715 if (NOTE_P (start
) && NOTE_KIND (start
) == NOTE_INSN_CFI
)
2718 start
= NEXT_INSN (start
);
2723 /* Insert CFI notes between traces to properly change state between them. */
2726 connect_traces (void)
2729 dw_trace_info
*prev_ti
, *ti
;
2731 /* ??? Ideally, we should have both queued and processed every trace.
2732 However the current representation of constant pools on various targets
2733 is indistinguishable from unreachable code. Assume for the moment that
2734 we can simply skip over such traces. */
2735 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2736 these are not "real" instructions, and should not be considered.
2737 This could be generically useful for tablejump data as well. */
2738 /* Remove all unprocessed traces from the list. */
2740 VEC_ORDERED_REMOVE_IF_FROM_TO (trace_info
, ix
, ix2
, ti
, 1,
2741 trace_info
.length (), ti
->beg_row
== NULL
);
2742 FOR_EACH_VEC_ELT (trace_info
, ix
, ti
)
2743 gcc_assert (ti
->end_row
!= NULL
);
2745 /* Work from the end back to the beginning. This lets us easily insert
2746 remember/restore_state notes in the correct order wrt other notes. */
2747 n
= trace_info
.length ();
2748 prev_ti
= &trace_info
[n
- 1];
2749 for (i
= n
- 1; i
> 0; --i
)
2751 dw_cfi_row
*old_row
;
2754 prev_ti
= &trace_info
[i
- 1];
2756 add_cfi_insn
= ti
->head
;
2758 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2759 for the portion of the function in the alternate text
2760 section. The row state at the very beginning of that
2761 new FDE will be exactly the row state from the CIE. */
2762 if (ti
->switch_sections
)
2763 old_row
= cie_cfi_row
;
2766 old_row
= prev_ti
->end_row
;
2767 /* If there's no change from the previous end state, fine. */
2768 if (cfi_row_equal_p (old_row
, ti
->beg_row
))
2770 /* Otherwise check for the common case of sharing state with
2771 the beginning of an epilogue, but not the end. Insert
2772 remember/restore opcodes in that case. */
2773 else if (cfi_row_equal_p (prev_ti
->beg_row
, ti
->beg_row
))
2777 /* Note that if we blindly insert the remember at the
2778 start of the trace, we can wind up increasing the
2779 size of the unwind info due to extra advance opcodes.
2780 Instead, put the remember immediately before the next
2781 state change. We know there must be one, because the
2782 state at the beginning and head of the trace differ. */
2783 add_cfi_insn
= before_next_cfi_note (prev_ti
->head
);
2785 cfi
->dw_cfi_opc
= DW_CFA_remember_state
;
2788 add_cfi_insn
= ti
->head
;
2790 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2793 old_row
= prev_ti
->beg_row
;
2795 /* Otherwise, we'll simply change state from the previous end. */
2798 change_cfi_row (old_row
, ti
->beg_row
);
2800 if (dump_file
&& add_cfi_insn
!= ti
->head
)
2804 fprintf (dump_file
, "Fixup between trace %u and %u:\n",
2805 prev_ti
->id
, ti
->id
);
2810 note
= NEXT_INSN (note
);
2811 gcc_assert (NOTE_P (note
) && NOTE_KIND (note
) == NOTE_INSN_CFI
);
2812 output_cfi_directive (dump_file
, NOTE_CFI (note
));
2814 while (note
!= add_cfi_insn
);
2818 /* Connect args_size between traces that have can_throw_internal insns. */
2819 if (cfun
->eh
->lp_array
)
2821 poly_int64 prev_args_size
= 0;
2823 for (i
= 0; i
< n
; ++i
)
2825 ti
= &trace_info
[i
];
2827 if (ti
->switch_sections
)
2830 if (ti
->eh_head
== NULL
)
2833 /* We require either the incoming args_size values to match or the
2834 presence of an insn setting it before the first EH insn. */
2835 gcc_assert (!ti
->args_size_undefined
|| ti
->args_size_defined_for_eh
);
2837 /* In the latter case, we force the creation of a CFI note. */
2838 if (ti
->args_size_undefined
2839 || maybe_ne (ti
->beg_delay_args_size
, prev_args_size
))
2841 /* ??? Search back to previous CFI note. */
2842 add_cfi_insn
= PREV_INSN (ti
->eh_head
);
2843 add_cfi_args_size (ti
->beg_delay_args_size
);
2846 prev_args_size
= ti
->end_delay_args_size
;
2851 /* Set up the pseudo-cfg of instruction traces, as described at the
2852 block comment at the top of the file. */
2855 create_pseudo_cfg (void)
2857 bool saw_barrier
, switch_sections
;
2862 /* The first trace begins at the start of the function,
2863 and begins with the CIE row state. */
2864 trace_info
.create (16);
2865 memset (&ti
, 0, sizeof (ti
));
2866 ti
.head
= get_insns ();
2867 ti
.beg_row
= cie_cfi_row
;
2868 ti
.cfa_store
= cie_cfi_row
->cfa
;
2869 ti
.cfa_temp
.reg
= INVALID_REGNUM
;
2870 trace_info
.quick_push (ti
);
2872 if (cie_return_save
)
2873 ti
.regs_saved_in_regs
.safe_push (*cie_return_save
);
2875 /* Walk all the insns, collecting start of trace locations. */
2876 saw_barrier
= false;
2877 switch_sections
= false;
2878 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2880 if (BARRIER_P (insn
))
2882 else if (NOTE_P (insn
)
2883 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2885 /* We should have just seen a barrier. */
2886 gcc_assert (saw_barrier
);
2887 switch_sections
= true;
2889 /* Watch out for save_point notes between basic blocks.
2890 In particular, a note after a barrier. Do not record these,
2891 delaying trace creation until the label. */
2892 else if (save_point_p (insn
)
2893 && (LABEL_P (insn
) || !saw_barrier
))
2895 memset (&ti
, 0, sizeof (ti
));
2897 ti
.switch_sections
= switch_sections
;
2898 ti
.id
= trace_info
.length ();
2899 trace_info
.safe_push (ti
);
2901 saw_barrier
= false;
2902 switch_sections
= false;
2906 /* Create the trace index after we've finished building trace_info,
2907 avoiding stale pointer problems due to reallocation. */
2909 = new hash_table
<trace_info_hasher
> (trace_info
.length ());
2911 FOR_EACH_VEC_ELT (trace_info
, i
, tp
)
2913 dw_trace_info
**slot
;
2916 fprintf (dump_file
, "Creating trace %u : start at %s %d%s\n", tp
->id
,
2917 rtx_name
[(int) GET_CODE (tp
->head
)], INSN_UID (tp
->head
),
2918 tp
->switch_sections
? " (section switch)" : "");
2920 slot
= trace_index
->find_slot_with_hash (tp
, INSN_UID (tp
->head
), INSERT
);
2921 gcc_assert (*slot
== NULL
);
2926 /* Record the initial position of the return address. RTL is
2927 INCOMING_RETURN_ADDR_RTX. */
2930 initial_return_save (rtx rtl
)
2932 unsigned int reg
= INVALID_REGNUM
;
2933 poly_int64 offset
= 0;
2935 switch (GET_CODE (rtl
))
2938 /* RA is in a register. */
2939 reg
= dwf_regno (rtl
);
2943 /* RA is on the stack. */
2944 rtl
= XEXP (rtl
, 0);
2945 switch (GET_CODE (rtl
))
2948 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2953 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2954 offset
= rtx_to_poly_int64 (XEXP (rtl
, 1));
2958 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2959 offset
= -rtx_to_poly_int64 (XEXP (rtl
, 1));
2969 /* The return address is at some offset from any value we can
2970 actually load. For instance, on the SPARC it is in %i7+8. Just
2971 ignore the offset for now; it doesn't matter for unwinding frames. */
2972 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2973 initial_return_save (XEXP (rtl
, 0));
2980 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2982 if (reg
!= INVALID_REGNUM
)
2983 record_reg_saved_in_reg (rtl
, pc_rtx
);
2984 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cur_row
->cfa
.offset
);
2989 create_cie_data (void)
2991 dw_cfa_location loc
;
2992 dw_trace_info cie_trace
;
2994 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2996 memset (&cie_trace
, 0, sizeof (cie_trace
));
2997 cur_trace
= &cie_trace
;
2999 add_cfi_vec
= &cie_cfi_vec
;
3000 cie_cfi_row
= cur_row
= new_cfi_row ();
3002 /* On entry, the Canonical Frame Address is at SP. */
3003 memset (&loc
, 0, sizeof (loc
));
3004 loc
.reg
= dw_stack_pointer_regnum
;
3005 /* create_cie_data is called just once per TU, and when using .cfi_startproc
3006 is even done by the assembler rather than the compiler. If the target
3007 has different incoming frame sp offsets depending on what kind of
3008 function it is, use a single constant offset for the target and
3009 if needed, adjust before the first instruction in insn stream. */
3010 loc
.offset
= DEFAULT_INCOMING_FRAME_SP_OFFSET
;
3013 if (targetm
.debug_unwind_info () == UI_DWARF2
3014 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3016 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
3018 /* For a few targets, we have the return address incoming into a
3019 register, but choose a different return column. This will result
3020 in a DW_CFA_register for the return, and an entry in
3021 regs_saved_in_regs to match. If the target later stores that
3022 return address register to the stack, we want to be able to emit
3023 the DW_CFA_offset against the return column, not the intermediate
3024 save register. Save the contents of regs_saved_in_regs so that
3025 we can re-initialize it at the start of each function. */
3026 switch (cie_trace
.regs_saved_in_regs
.length ())
3031 cie_return_save
= ggc_alloc
<reg_saved_in_data
> ();
3032 *cie_return_save
= cie_trace
.regs_saved_in_regs
[0];
3033 cie_trace
.regs_saved_in_regs
.release ();
3045 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3046 state at each location within the function. These notes will be
3047 emitted during pass_final. */
3050 execute_dwarf2_frame (void)
3052 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3053 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
3055 /* The first time we're called, compute the incoming frame state. */
3056 if (cie_cfi_vec
== NULL
)
3059 dwarf2out_alloc_current_fde ();
3061 create_pseudo_cfg ();
3064 create_cfi_notes ();
3068 /* Free all the data we allocated. */
3073 FOR_EACH_VEC_ELT (trace_info
, i
, ti
)
3074 ti
->regs_saved_in_regs
.release ();
3076 trace_info
.release ();
3084 /* Convert a DWARF call frame info. operation to its string name */
3087 dwarf_cfi_name (unsigned int cfi_opc
)
3089 const char *name
= get_DW_CFA_name (cfi_opc
);
3094 return "DW_CFA_<unknown>";
3097 /* This routine will generate the correct assembly data for a location
3098 description based on a cfi entry with a complex address. */
3101 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
3103 dw_loc_descr_ref loc
;
3106 if (cfi
->dw_cfi_opc
== DW_CFA_expression
3107 || cfi
->dw_cfi_opc
== DW_CFA_val_expression
)
3110 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3111 dw2_asm_output_data (1, r
, NULL
);
3112 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3115 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3117 /* Output the size of the block. */
3118 size
= size_of_locs (loc
);
3119 dw2_asm_output_data_uleb128 (size
, NULL
);
3121 /* Now output the operations themselves. */
3122 output_loc_sequence (loc
, for_eh
);
3125 /* Similar, but used for .cfi_escape. */
3128 output_cfa_loc_raw (dw_cfi_ref cfi
)
3130 dw_loc_descr_ref loc
;
3133 if (cfi
->dw_cfi_opc
== DW_CFA_expression
3134 || cfi
->dw_cfi_opc
== DW_CFA_val_expression
)
3137 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3138 fprintf (asm_out_file
, "%#x,", r
);
3139 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3142 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3144 /* Output the size of the block. */
3145 size
= size_of_locs (loc
);
3146 dw2_asm_output_data_uleb128_raw (size
);
3147 fputc (',', asm_out_file
);
3149 /* Now output the operations themselves. */
3150 output_loc_sequence_raw (loc
);
3153 /* Output a Call Frame Information opcode and its operand(s). */
3156 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
3161 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
3162 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
3163 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
3164 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
3165 ((unsigned HOST_WIDE_INT
)
3166 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
3167 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
3169 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3170 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3171 "DW_CFA_offset, column %#lx", r
);
3172 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3173 dw2_asm_output_data_uleb128 (off
, NULL
);
3175 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
3177 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3178 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3179 "DW_CFA_restore, column %#lx", r
);
3183 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
3184 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
3186 switch (cfi
->dw_cfi_opc
)
3188 case DW_CFA_set_loc
:
3190 dw2_asm_output_encoded_addr_rtx (
3191 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3192 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
3195 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
3196 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
3197 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3200 case DW_CFA_advance_loc1
:
3201 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3202 fde
->dw_fde_current_label
, NULL
);
3203 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3206 case DW_CFA_advance_loc2
:
3207 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3208 fde
->dw_fde_current_label
, NULL
);
3209 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3212 case DW_CFA_advance_loc4
:
3213 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3214 fde
->dw_fde_current_label
, NULL
);
3215 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3218 case DW_CFA_MIPS_advance_loc8
:
3219 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3220 fde
->dw_fde_current_label
, NULL
);
3221 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3224 case DW_CFA_offset_extended
:
3225 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3226 dw2_asm_output_data_uleb128 (r
, NULL
);
3227 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3228 dw2_asm_output_data_uleb128 (off
, NULL
);
3231 case DW_CFA_def_cfa
:
3232 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3233 dw2_asm_output_data_uleb128 (r
, NULL
);
3234 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
3237 case DW_CFA_offset_extended_sf
:
3238 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3239 dw2_asm_output_data_uleb128 (r
, NULL
);
3240 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3241 dw2_asm_output_data_sleb128 (off
, NULL
);
3244 case DW_CFA_def_cfa_sf
:
3245 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3246 dw2_asm_output_data_uleb128 (r
, NULL
);
3247 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3248 dw2_asm_output_data_sleb128 (off
, NULL
);
3251 case DW_CFA_restore_extended
:
3252 case DW_CFA_undefined
:
3253 case DW_CFA_same_value
:
3254 case DW_CFA_def_cfa_register
:
3255 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3256 dw2_asm_output_data_uleb128 (r
, NULL
);
3259 case DW_CFA_register
:
3260 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3261 dw2_asm_output_data_uleb128 (r
, NULL
);
3262 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
3263 dw2_asm_output_data_uleb128 (r
, NULL
);
3266 case DW_CFA_def_cfa_offset
:
3267 case DW_CFA_GNU_args_size
:
3268 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
3271 case DW_CFA_def_cfa_offset_sf
:
3272 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3273 dw2_asm_output_data_sleb128 (off
, NULL
);
3276 case DW_CFA_GNU_window_save
:
3279 case DW_CFA_def_cfa_expression
:
3280 case DW_CFA_expression
:
3281 case DW_CFA_val_expression
:
3282 output_cfa_loc (cfi
, for_eh
);
3285 case DW_CFA_GNU_negative_offset_extended
:
3286 /* Obsoleted by DW_CFA_offset_extended_sf. */
3295 /* Similar, but do it via assembler directives instead. */
3298 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
3300 unsigned long r
, r2
;
3302 switch (cfi
->dw_cfi_opc
)
3304 case DW_CFA_advance_loc
:
3305 case DW_CFA_advance_loc1
:
3306 case DW_CFA_advance_loc2
:
3307 case DW_CFA_advance_loc4
:
3308 case DW_CFA_MIPS_advance_loc8
:
3309 case DW_CFA_set_loc
:
3310 /* Should only be created in a code path not followed when emitting
3311 via directives. The assembler is going to take care of this for
3312 us. But this routines is also used for debugging dumps, so
3314 gcc_assert (f
!= asm_out_file
);
3315 fprintf (f
, "\t.cfi_advance_loc\n");
3319 case DW_CFA_offset_extended
:
3320 case DW_CFA_offset_extended_sf
:
3321 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3322 fprintf (f
, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3323 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3326 case DW_CFA_restore
:
3327 case DW_CFA_restore_extended
:
3328 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3329 fprintf (f
, "\t.cfi_restore %lu\n", r
);
3332 case DW_CFA_undefined
:
3333 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3334 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
3337 case DW_CFA_same_value
:
3338 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3339 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
3342 case DW_CFA_def_cfa
:
3343 case DW_CFA_def_cfa_sf
:
3344 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3345 fprintf (f
, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3346 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3349 case DW_CFA_def_cfa_register
:
3350 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3351 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
3354 case DW_CFA_register
:
3355 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3356 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
3357 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
3360 case DW_CFA_def_cfa_offset
:
3361 case DW_CFA_def_cfa_offset_sf
:
3362 fprintf (f
, "\t.cfi_def_cfa_offset "
3363 HOST_WIDE_INT_PRINT_DEC
"\n",
3364 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3367 case DW_CFA_remember_state
:
3368 fprintf (f
, "\t.cfi_remember_state\n");
3370 case DW_CFA_restore_state
:
3371 fprintf (f
, "\t.cfi_restore_state\n");
3374 case DW_CFA_GNU_args_size
:
3375 if (f
== asm_out_file
)
3377 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
3378 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3380 fprintf (f
, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC
,
3381 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3386 fprintf (f
, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC
"\n",
3387 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3391 case DW_CFA_GNU_window_save
:
3392 fprintf (f
, "\t.cfi_window_save\n");
3395 case DW_CFA_def_cfa_expression
:
3396 case DW_CFA_expression
:
3397 case DW_CFA_val_expression
:
3398 if (f
!= asm_out_file
)
3400 fprintf (f
, "\t.cfi_%scfa_%sexpression ...\n",
3401 cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
? "def_" : "",
3402 cfi
->dw_cfi_opc
== DW_CFA_val_expression
? "val_" : "");
3405 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
3406 output_cfa_loc_raw (cfi
);
3416 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
3418 if (dwarf2out_do_cfi_asm ())
3419 output_cfi_directive (asm_out_file
, cfi
);
3423 dump_cfi_row (FILE *f
, dw_cfi_row
*row
)
3431 dw_cfa_location dummy
;
3432 memset (&dummy
, 0, sizeof (dummy
));
3433 dummy
.reg
= INVALID_REGNUM
;
3434 cfi
= def_cfa_0 (&dummy
, &row
->cfa
);
3436 output_cfi_directive (f
, cfi
);
3438 FOR_EACH_VEC_SAFE_ELT (row
->reg_save
, i
, cfi
)
3440 output_cfi_directive (f
, cfi
);
3443 void debug_cfi_row (dw_cfi_row
*row
);
3446 debug_cfi_row (dw_cfi_row
*row
)
3448 dump_cfi_row (stderr
, row
);
3452 /* Save the result of dwarf2out_do_frame across PCH.
3453 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3454 static GTY(()) signed char saved_do_cfi_asm
= 0;
3456 /* Decide whether to emit EH frame unwind information for the current
3457 translation unit. */
3460 dwarf2out_do_eh_frame (void)
3463 (flag_unwind_tables
|| flag_exceptions
)
3464 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
;
3467 /* Decide whether we want to emit frame unwind information for the current
3468 translation unit. */
3471 dwarf2out_do_frame (void)
3473 /* We want to emit correct CFA location expressions or lists, so we
3474 have to return true if we're going to output debug info, even if
3475 we're not going to output frame or unwind info. */
3476 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3479 if (saved_do_cfi_asm
> 0)
3482 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3485 if (dwarf2out_do_eh_frame ())
3491 /* Decide whether to emit frame unwind via assembler directives. */
3494 dwarf2out_do_cfi_asm (void)
3498 if (saved_do_cfi_asm
!= 0)
3499 return saved_do_cfi_asm
> 0;
3501 /* Assume failure for a moment. */
3502 saved_do_cfi_asm
= -1;
3504 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3506 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3509 /* Make sure the personality encoding is one the assembler can support.
3510 In particular, aligned addresses can't be handled. */
3511 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3512 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3514 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3515 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3518 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3519 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3520 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
&& !dwarf2out_do_eh_frame ())
3524 saved_do_cfi_asm
= 1;
3530 const pass_data pass_data_dwarf2_frame
=
3532 RTL_PASS
, /* type */
3533 "dwarf2", /* name */
3534 OPTGROUP_NONE
, /* optinfo_flags */
3535 TV_FINAL
, /* tv_id */
3536 0, /* properties_required */
3537 0, /* properties_provided */
3538 0, /* properties_destroyed */
3539 0, /* todo_flags_start */
3540 0, /* todo_flags_finish */
3543 class pass_dwarf2_frame
: public rtl_opt_pass
3546 pass_dwarf2_frame (gcc::context
*ctxt
)
3547 : rtl_opt_pass (pass_data_dwarf2_frame
, ctxt
)
3550 /* opt_pass methods: */
3551 virtual bool gate (function
*);
3552 virtual unsigned int execute (function
*) { return execute_dwarf2_frame (); }
3554 }; // class pass_dwarf2_frame
3557 pass_dwarf2_frame::gate (function
*)
3559 /* Targets which still implement the prologue in assembler text
3560 cannot use the generic dwarf2 unwinding. */
3561 if (!targetm
.have_prologue ())
3564 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3565 from the optimized shrink-wrapping annotations that we will compute.
3566 For now, only produce the CFI notes for dwarf2. */
3567 return dwarf2out_do_frame ();
3573 make_pass_dwarf2_frame (gcc::context
*ctxt
)
3575 return new pass_dwarf2_frame (ctxt
);
3578 #include "gt-dwarf2cfi.h"